summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-16 11:45:35 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-07-17 08:59:23 +0000
commit552906b0f222c5d5dd11b9fd73829d510980461a (patch)
tree3a11e6ed0538a81dd83b20cf3a4783e297f26d91 /chromium/third_party/dawn
parent1b05827804eaf047779b597718c03e7d38344261 (diff)
downloadqtwebengine-chromium-552906b0f222c5d5dd11b9fd73829d510980461a.tar.gz
BASELINE: Update Chromium to 83.0.4103.122
Change-Id: Ie3a82f5bb0076eec2a7c6a6162326b4301ee291e Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/third_party/dawn')
-rw-r--r--chromium/third_party/dawn/.gn7
-rw-r--r--chromium/third_party/dawn/BUILD.gn166
-rw-r--r--chromium/third_party/dawn/CMakeLists.txt149
-rw-r--r--chromium/third_party/dawn/DEPS29
-rw-r--r--chromium/third_party/dawn/OWNERS1
-rw-r--r--chromium/third_party/dawn/build_overrides/build.gni39
-rw-r--r--chromium/third_party/dawn/build_overrides/dawn.gni1
-rw-r--r--chromium/third_party/dawn/dawn.json181
-rw-r--r--chromium/third_party/dawn/dawn_wire.json9
-rw-r--r--chromium/third_party/dawn/docs/fuzzing.md26
-rw-r--r--chromium/third_party/dawn/docs/infra.md93
-rw-r--r--chromium/third_party/dawn/docs/testing.md66
-rw-r--r--chromium/third_party/dawn/examples/Animometer.cpp2
-rw-r--r--chromium/third_party/dawn/examples/CHelloTriangle.cpp37
-rw-r--r--chromium/third_party/dawn/examples/CMakeLists.txt44
-rw-r--r--chromium/third_party/dawn/examples/ComputeBoids.cpp4
-rw-r--r--chromium/third_party/dawn/examples/CppHelloTriangle.cpp2
-rw-r--r--chromium/third_party/dawn/examples/CubeReflection.cpp6
-rw-r--r--chromium/third_party/dawn/examples/ManualSwapChainTest.cpp362
-rw-r--r--chromium/third_party/dawn/examples/SampleUtils.cpp46
-rw-r--r--chromium/third_party/dawn/generator/CMakeLists.txt116
-rw-r--r--chromium/third_party/dawn/generator/dawn_json_generator.py41
-rw-r--r--chromium/third_party/dawn/generator/extract_json.py2
-rw-r--r--chromium/third_party/dawn/generator/generator_lib.py50
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn.h61
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp13
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.cpp9
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.h18
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_proc.c4
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_proc_table.h1
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp12
-rw-r--r--chromium/third_party/dawn/generator/templates/dawncpp.h54
-rw-r--r--chromium/third_party/dawn/generator/templates/library_webgpu_enum_tables.js35
-rw-r--r--chromium/third_party/dawn/generator/templates/mock_webgpu.cpp36
-rw-r--r--chromium/third_party/dawn/generator/templates/mock_webgpu.h13
-rw-r--r--chromium/third_party/dawn/generator/templates/opengl/OpenGLFunctionsBase.cpp2
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu.h35
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp15
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_cpp.h17
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_struct_info.json51
-rw-r--r--chromium/third_party/dawn/infra/config/global/cr-buildbucket.cfg4
-rw-r--r--chromium/third_party/dawn/scripts/dawn_features.gni31
-rw-r--r--chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni4
-rwxr-xr-xchromium/third_party/dawn/scripts/perf_test_runner.py4
-rwxr-xr-xchromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh98
-rw-r--r--chromium/third_party/dawn/src/Dummy.cpp18
-rw-r--r--chromium/third_party/dawn/src/common/Assert.cpp11
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn31
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt52
-rw-r--r--chromium/third_party/dawn/src/common/Compiler.h9
-rw-r--r--chromium/third_party/dawn/src/common/Constants.h7
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.cpp36
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.h39
-rw-r--r--chromium/third_party/dawn/src/common/LinkedList.h193
-rw-r--r--chromium/third_party/dawn/src/common/Log.cpp116
-rw-r--r--chromium/third_party/dawn/src/common/Log.h95
-rw-r--r--chromium/third_party/dawn/src/common/Math.cpp31
-rw-r--r--chromium/third_party/dawn/src/common/Math.h16
-rw-r--r--chromium/third_party/dawn/src/common/PlacementAllocated.h37
-rw-r--r--chromium/third_party/dawn/src/common/Result.h125
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.cpp249
-rw-r--r--chromium/third_party/dawn/src/common/SlabAllocator.h184
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.cpp4
-rw-r--r--chromium/third_party/dawn/src/common/vulkan_platform.h216
-rw-r--r--chromium/third_party/dawn/src/common/windows_with_undefs.h1
-rw-r--r--chromium/third_party/dawn/src/common/xlib_with_undefs.h2
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn10
-rw-r--r--chromium/third_party/dawn/src/dawn/CMakeLists.txt85
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BackendConnection.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp144
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp287
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt439
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp118
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.h102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp52
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp73
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp150
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.cpp45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Error.h101
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorInjector.h68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.h25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp79
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RefCounted.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp613
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.cpp193
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.h79
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface_metal.mm30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp278
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h105
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp157
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp159
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp650
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h46
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp127
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp278
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp116
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp206
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp245
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm548
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm119
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm159
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/Forward.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm182
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm97
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm212
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp121
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h49
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp226
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/Forward.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp51
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp93
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp73
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp194
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp228
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp37
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h78
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp178
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt22
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt57
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireClient.cpp9
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp14
-rw-r--r--chromium/third_party/dawn/src/fuzzers/BUILD.gn59
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h19
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h56
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h14
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h24
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h3
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.cpp29
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.h9
-rw-r--r--chromium/third_party/dawn/src/utils/CMakeLists.txt80
-rw-r--r--chromium/third_party/dawn/src/utils/GLFWUtils.cpp83
-rw-r--r--chromium/third_party/dawn/src/utils/GLFWUtils.h42
-rw-r--r--chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm54
-rw-r--r--chromium/third_party/dawn/src/utils/ObjCUtils.h29
-rw-r--r--chromium/third_party/dawn/src/utils/ObjCUtils.mm25
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp89
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.h58
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp6
-rw-r--r--chromium/third_party/dawn/third_party/BUILD.gn12
-rw-r--r--chromium/third_party/dawn/third_party/CMakeLists.txt97
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vk_icd.h10
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_android.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_core.h531
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_fuchsia.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ggp.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ios.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_macos.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_metal.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_vi.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_wayland.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_win32.h13
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xcb.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib.h9
-rw-r--r--chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib_xrandr.h9
304 files changed, 12874 insertions, 3464 deletions
diff --git a/chromium/third_party/dawn/.gn b/chromium/third_party/dawn/.gn
index 038d50ee8ee..c2127661ede 100644
--- a/chromium/third_party/dawn/.gn
+++ b/chromium/third_party/dawn/.gn
@@ -16,6 +16,13 @@ buildconfig = "//build/config/BUILDCONFIG.gn"
default_args = {
clang_use_chrome_plugins = false
+
+ # Override the mac version so standalone Dawn compiles with at least 10.11
+ # which allows us to not skip the -Wunguarded-availability warning and get
+ # proper warnings for use of APIs that are 10.12 and above (even if
+ # Chromium is still on 10.10).
+ mac_deployment_target = "10.11.0"
+ mac_min_system_version = "10.11.0"
}
check_targets = [
diff --git a/chromium/third_party/dawn/BUILD.gn b/chromium/third_party/dawn/BUILD.gn
index 88085a86092..ee3e71e5cf8 100644
--- a/chromium/third_party/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/BUILD.gn
@@ -31,6 +31,11 @@ if (use_swiftshader) {
import("${dawn_swiftshader_dir}/src/Vulkan/vulkan.gni")
}
+# Import mac_min_system_version
+if (is_mac) {
+ import("//build/config/mac/mac_sdk.gni")
+}
+
###############################################################################
# dawn_platform
###############################################################################
@@ -60,9 +65,14 @@ config("libdawn_native_internal") {
# Suppress warnings that Metal isn't in the deployment target of Chrome:
# initialization of the Metal backend is behind a IsMetalSupported check so
# Dawn won't call Metal functions on macOS 10.10.
+ # At the time this is written Chromium supports 10.10.0 and above, so if we
+ # aren't on 10.11 it means we are on 10.11 and above, and Metal is available.
+ # Skipping this check on 10.11 and above is important as it allows getting
+ # proper compilation warning when using 10.12 and above feature for example.
# TODO(cwallez@chromium.org): Consider using API_AVAILABLE annotations on all
- # metal code in dawn once crbug.com/1004024 is sorted out.
- if (is_mac) {
+ # metal code in dawn once crbug.com/1004024 is sorted out if Chromium still
+ # supports 10.10 then.
+ if (is_mac && mac_min_system_version == "10.10.0") {
cflags_objcc = [ "-Wno-unguarded-availability" ]
}
}
@@ -161,9 +171,11 @@ source_set("libdawn_native_sources") {
"src/dawn_native/BackendConnection.h",
"src/dawn_native/BindGroup.cpp",
"src/dawn_native/BindGroup.h",
+ "src/dawn_native/BindGroupAndStorageBarrierTracker.h",
"src/dawn_native/BindGroupLayout.cpp",
"src/dawn_native/BindGroupLayout.h",
"src/dawn_native/BindGroupTracker.h",
+ "src/dawn_native/BindingInfo.h",
"src/dawn_native/BuddyAllocator.cpp",
"src/dawn_native/BuddyAllocator.h",
"src/dawn_native/BuddyMemoryAllocator.cpp",
@@ -198,6 +210,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/Error.h",
"src/dawn_native/ErrorData.cpp",
"src/dawn_native/ErrorData.h",
+ "src/dawn_native/ErrorInjector.cpp",
+ "src/dawn_native/ErrorInjector.h",
"src/dawn_native/ErrorScope.cpp",
"src/dawn_native/ErrorScope.h",
"src/dawn_native/ErrorScopeTracker.cpp",
@@ -252,6 +266,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/ShaderModule.h",
"src/dawn_native/StagingBuffer.cpp",
"src/dawn_native/StagingBuffer.h",
+ "src/dawn_native/Surface.cpp",
+ "src/dawn_native/Surface.h",
"src/dawn_native/SwapChain.cpp",
"src/dawn_native/SwapChain.h",
"src/dawn_native/Texture.cpp",
@@ -287,6 +303,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/d3d12/D3D12Error.h",
"src/dawn_native/d3d12/D3D12Info.cpp",
"src/dawn_native/d3d12/D3D12Info.h",
+ "src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp",
+ "src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h",
"src/dawn_native/d3d12/DescriptorHeapAllocator.cpp",
"src/dawn_native/d3d12/DescriptorHeapAllocator.h",
"src/dawn_native/d3d12/DeviceD3D12.cpp",
@@ -308,6 +326,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/d3d12/RenderPassBuilderD3D12.h",
"src/dawn_native/d3d12/RenderPipelineD3D12.cpp",
"src/dawn_native/d3d12/RenderPipelineD3D12.h",
+ "src/dawn_native/d3d12/ResidencyManagerD3D12.cpp",
+ "src/dawn_native/d3d12/ResidencyManagerD3D12.h",
"src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp",
"src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h",
"src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp",
@@ -316,6 +336,8 @@ source_set("libdawn_native_sources") {
"src/dawn_native/d3d12/SamplerD3D12.h",
"src/dawn_native/d3d12/ShaderModuleD3D12.cpp",
"src/dawn_native/d3d12/ShaderModuleD3D12.h",
+ "src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
+ "src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
"src/dawn_native/d3d12/StagingBufferD3D12.cpp",
"src/dawn_native/d3d12/StagingBufferD3D12.h",
"src/dawn_native/d3d12/SwapChainD3D12.cpp",
@@ -335,14 +357,22 @@ source_set("libdawn_native_sources") {
"Cocoa.framework",
"IOKit.framework",
"IOSurface.framework",
+ "QuartzCore.framework",
]
sources += [
+ "src/dawn_native/Surface_metal.mm",
"src/dawn_native/metal/BackendMTL.h",
"src/dawn_native/metal/BackendMTL.mm",
+ "src/dawn_native/metal/BindGroupLayoutMTL.h",
+ "src/dawn_native/metal/BindGroupLayoutMTL.mm",
+ "src/dawn_native/metal/BindGroupMTL.h",
+ "src/dawn_native/metal/BindGroupMTL.mm",
"src/dawn_native/metal/BufferMTL.h",
"src/dawn_native/metal/BufferMTL.mm",
"src/dawn_native/metal/CommandBufferMTL.h",
"src/dawn_native/metal/CommandBufferMTL.mm",
+ "src/dawn_native/metal/CommandRecordingContext.h",
+ "src/dawn_native/metal/CommandRecordingContext.mm",
"src/dawn_native/metal/ComputePipelineMTL.h",
"src/dawn_native/metal/ComputePipelineMTL.mm",
"src/dawn_native/metal/DeviceMTL.h",
@@ -385,6 +415,10 @@ source_set("libdawn_native_sources") {
sources += [
"src/dawn_native/opengl/BackendGL.cpp",
"src/dawn_native/opengl/BackendGL.h",
+ "src/dawn_native/opengl/BindGroupGL.cpp",
+ "src/dawn_native/opengl/BindGroupGL.h",
+ "src/dawn_native/opengl/BindGroupLayoutGL.cpp",
+ "src/dawn_native/opengl/BindGroupLayoutGL.h",
"src/dawn_native/opengl/BufferGL.cpp",
"src/dawn_native/opengl/BufferGL.h",
"src/dawn_native/opengl/CommandBufferGL.cpp",
@@ -525,11 +559,20 @@ source_set("libdawn_native_sources") {
}
if (use_swiftshader) {
- data_deps += [ "${dawn_swiftshader_dir}/src/Vulkan:icd_file" ]
- defines +=
- [ "DAWN_SWIFTSHADER_VK_ICD_JSON=\"${swiftshader_icd_file_name}\"" ]
+ data_deps += [
+ "${dawn_swiftshader_dir}/src/Vulkan:icd_file",
+ "${dawn_swiftshader_dir}/src/Vulkan:swiftshader_libvulkan",
+ ]
+ defines += [
+ "DAWN_ENABLE_SWIFTSHADER",
+ "DAWN_SWIFTSHADER_VK_ICD_JSON=\"${swiftshader_icd_file_name}\"",
+ ]
}
}
+
+ if (dawn_use_x11) {
+ libs += [ "X11" ]
+ }
}
# The static and shared libraries for libdawn_native. Most of the files are
@@ -651,10 +694,12 @@ dawn_component("libdawn_wire") {
# GLFW wrapping target
###############################################################################
+supports_glfw_for_windowing = is_win || (is_linux && !is_chromeos) || is_mac
+
# GLFW does not support ChromeOS, Android or Fuchsia, so provide a small mock
# library that can be linked into the Dawn tests on these platforms. Otherwise,
# use the real library from third_party/.
-if (is_win || (is_linux && !is_chromeos) || is_mac) {
+if (supports_glfw_for_windowing) {
group("dawn_glfw") {
public_deps = [
"third_party:glfw",
@@ -709,29 +754,49 @@ static_library("dawn_utils") {
"src/utils/SystemUtils.h",
"src/utils/TerribleCommandBuffer.cpp",
"src/utils/TerribleCommandBuffer.h",
+ "src/utils/TextureFormatUtils.cpp",
+ "src/utils/TextureFormatUtils.h",
"src/utils/Timer.h",
"src/utils/WGPUHelpers.cpp",
"src/utils/WGPUHelpers.h",
]
+ deps = [
+ ":libdawn_native",
+ ":libdawn_wire",
+ "${dawn_root}/src/common",
+ "${dawn_shaderc_dir}:libshaderc",
+ ]
+ libs = []
if (is_win) {
sources += [ "src/utils/WindowsTimer.cpp" ]
} else if (is_mac) {
- sources += [ "src/utils/OSXTimer.cpp" ]
+ sources += [
+ "src/utils/OSXTimer.cpp",
+ "src/utils/ObjCUtils.h",
+ "src/utils/ObjCUtils.mm",
+ ]
+ libs += [ "QuartzCore.framework" ]
} else {
sources += [ "src/utils/PosixTimer.cpp" ]
}
+ if (supports_glfw_for_windowing) {
+ sources += [
+ "src/utils/GLFWUtils.cpp",
+ "src/utils/GLFWUtils.h",
+ ]
+ deps += [ ":dawn_glfw" ]
+
+ if (dawn_enable_metal) {
+ sources += [ "src/utils/GLFWUtils_metal.mm" ]
+ libs += [ "Metal.framework" ]
+ }
+ }
+
public_deps = [
"${dawn_root}/src/dawn:dawncpp_headers",
]
-
- deps = [
- ":libdawn_native",
- ":libdawn_wire",
- "${dawn_root}/src/common",
- "${dawn_shaderc_dir}:libshaderc",
- ]
}
###############################################################################
@@ -771,8 +836,6 @@ test("dawn_unittests") {
"src/dawn_wire/client/ClientMemoryTransferService_mock.h",
"src/dawn_wire/server/ServerMemoryTransferService_mock.cpp",
"src/dawn_wire/server/ServerMemoryTransferService_mock.h",
- ]
- sources += [
"src/tests/unittests/BitSetIteratorTests.cpp",
"src/tests/unittests/BuddyAllocatorTests.cpp",
"src/tests/unittests/BuddyMemoryAllocatorTests.cpp",
@@ -781,14 +844,17 @@ test("dawn_unittests") {
"src/tests/unittests/ErrorTests.cpp",
"src/tests/unittests/ExtensionTests.cpp",
"src/tests/unittests/GetProcAddressTests.cpp",
+ "src/tests/unittests/LinkedListTests.cpp",
"src/tests/unittests/MathTests.cpp",
"src/tests/unittests/ObjectBaseTests.cpp",
"src/tests/unittests/PerStageTests.cpp",
+ "src/tests/unittests/PlacementAllocatedTests.cpp",
"src/tests/unittests/RefCountedTests.cpp",
"src/tests/unittests/ResultTests.cpp",
"src/tests/unittests/RingBufferAllocatorTests.cpp",
"src/tests/unittests/SerialMapTests.cpp",
"src/tests/unittests/SerialQueueTests.cpp",
+ "src/tests/unittests/SlabAllocatorTests.cpp",
"src/tests/unittests/SystemUtilsTests.cpp",
"src/tests/unittests/ToBackendTests.cpp",
"src/tests/unittests/validation/BindGroupValidationTests.cpp",
@@ -809,8 +875,10 @@ test("dawn_unittests") {
"src/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp",
"src/tests/unittests/validation/RenderPassValidationTests.cpp",
"src/tests/unittests/validation/RenderPipelineValidationTests.cpp",
+ "src/tests/unittests/validation/ResourceUsageTrackingTests.cpp",
"src/tests/unittests/validation/SamplerValidationTests.cpp",
"src/tests/unittests/validation/ShaderModuleValidationTests.cpp",
+ "src/tests/unittests/validation/StorageTextureValidationTests.cpp",
"src/tests/unittests/validation/TextureValidationTests.cpp",
"src/tests/unittests/validation/TextureViewValidationTests.cpp",
"src/tests/unittests/validation/ToggleValidationTests.cpp",
@@ -875,6 +943,7 @@ source_set("dawn_end2end_tests_sources") {
"src/tests/end2end/DebugMarkerTests.cpp",
"src/tests/end2end/DepthStencilStateTests.cpp",
"src/tests/end2end/DestroyTests.cpp",
+ "src/tests/end2end/DeviceLostTests.cpp",
"src/tests/end2end/DrawIndexedIndirectTests.cpp",
"src/tests/end2end/DrawIndexedTests.cpp",
"src/tests/end2end/DrawIndirectTests.cpp",
@@ -893,6 +962,7 @@ source_set("dawn_end2end_tests_sources") {
"src/tests/end2end/RenderPassTests.cpp",
"src/tests/end2end/SamplerTests.cpp",
"src/tests/end2end/ScissorTests.cpp",
+ "src/tests/end2end/StorageTextureTests.cpp",
"src/tests/end2end/TextureFormatTests.cpp",
"src/tests/end2end/TextureViewTests.cpp",
"src/tests/end2end/TextureZeroInitTests.cpp",
@@ -902,13 +972,13 @@ source_set("dawn_end2end_tests_sources") {
"src/tests/end2end/ViewportTests.cpp",
]
- libs = []
-
- if (dawn_enable_metal) {
- sources += [ "src/tests/end2end/IOSurfaceWrappingTests.cpp" ]
+ # Validation tests that need OS windows live in end2end tests.
+ sources += [
+ "src/tests/unittests/validation/ValidationTest.cpp",
+ "src/tests/unittests/validation/ValidationTest.h",
+ ]
- libs += [ "IOSurface.framework" ]
- }
+ libs = []
if (dawn_enable_d3d12) {
sources += [ "src/tests/end2end/D3D12ResourceWrappingTests.cpp" ]
@@ -918,19 +988,34 @@ source_set("dawn_end2end_tests_sources") {
]
}
+ if (dawn_enable_metal) {
+ sources += [ "src/tests/end2end/IOSurfaceWrappingTests.cpp" ]
+ libs += [ "IOSurface.framework" ]
+ }
+
if (dawn_enable_opengl) {
+ assert(supports_glfw_for_windowing)
+ }
+
+ if (supports_glfw_for_windowing) {
+ sources += [
+ "src/tests/end2end/SwapChainTests.cpp",
+ "src/tests/end2end/SwapChainValidationTests.cpp",
+ "src/tests/end2end/WindowSurfaceTests.cpp",
+ ]
deps += [ ":dawn_glfw" ]
}
}
source_set("dawn_white_box_tests_sources") {
- configs += [ "${dawn_root}/src/common:dawn_internal" ]
+ configs += [ ":libdawn_native_internal" ]
testonly = true
deps = [
":dawn_utils",
- ":libdawn_native",
":libdawn_native_sources",
+ ":libdawn_native_static", # Static because the tests both link against and have libdawn_native
+ # sources. MSVC errors when both importing and exporting symbols.
":libdawn_wire",
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawncpp",
@@ -945,13 +1030,26 @@ source_set("dawn_white_box_tests_sources") {
if (dawn_enable_vulkan) {
deps += [ "third_party:vulkan_headers" ]
- if (is_linux) {
- sources += [ "src/tests/white_box/VulkanImageWrappingTests.cpp" ]
+ if (is_chromeos) {
+ sources += [ "src/tests/white_box/VulkanImageWrappingTestsDmaBuf.cpp" ]
+ } else if (is_linux) {
+ sources += [ "src/tests/white_box/VulkanImageWrappingTestsOpaqueFD.cpp" ]
+ }
+
+ if (dawn_enable_error_injection) {
+ sources += [ "src/tests/white_box/VulkanErrorInjectorTests.cpp" ]
}
}
if (dawn_enable_d3d12) {
- sources += [ "src/tests/white_box/D3D12SmallTextureTests.cpp" ]
+ sources += [
+ "src/tests/white_box/D3D12DescriptorHeapTests.cpp",
+ "src/tests/white_box/D3D12SmallTextureTests.cpp",
+ ]
+ }
+
+ if (dawn_enable_metal) {
+ sources += [ "src/tests/white_box/MetalAutoreleasePoolTests.mm" ]
}
if (dawn_enable_opengl) {
@@ -994,6 +1092,10 @@ test("dawn_end2end_tests") {
if (dawn_enable_opengl) {
deps += [ ":dawn_glfw" ]
}
+
+ if (is_chromeos) {
+ libs += [ "gbm" ]
+ }
}
test("dawn_perf_tests") {
@@ -1004,7 +1106,6 @@ test("dawn_perf_tests") {
":dawn_utils",
":libdawn_native",
":libdawn_wire",
- "${dawn_jsoncpp_dir}:jsoncpp",
"${dawn_root}/src/common",
"${dawn_root}/src/dawn:dawncpp",
"${dawn_root}/src/dawn:libdawn_proc",
@@ -1193,6 +1294,12 @@ if (dawn_standalone) {
]
}
+ dawn_sample("ManualSwapChainTest") {
+ sources = [
+ "examples/ManualSwapChainTest.cpp",
+ ]
+ }
+
group("dawn_samples") {
deps = [
":Animometer",
@@ -1214,6 +1321,9 @@ group("dawn_fuzzers") {
"src/fuzzers:dawn_spirv_cross_glsl_fast_fuzzer",
"src/fuzzers:dawn_spirv_cross_hlsl_fast_fuzzer",
"src/fuzzers:dawn_spirv_cross_msl_fast_fuzzer",
+ "src/fuzzers:dawn_spvc_glsl_fast_fuzzer",
+ "src/fuzzers:dawn_spvc_hlsl_fast_fuzzer",
+ "src/fuzzers:dawn_spvc_msl_fast_fuzzer",
"src/fuzzers:dawn_wire_server_and_frontend_fuzzer",
]
}
diff --git a/chromium/third_party/dawn/CMakeLists.txt b/chromium/third_party/dawn/CMakeLists.txt
new file mode 100644
index 00000000000..0ab4e4e8baf
--- /dev/null
+++ b/chromium/third_party/dawn/CMakeLists.txt
@@ -0,0 +1,149 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.10)
+
+# When upgrading to CMake 3.11 we can remove DAWN_DUMMY_FILE because source-less add_library
+# becomes available.
+# When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in
+# case any of the generator files changes.
+
+project(
+ Dawn
+ DESCRIPTION "Dawn, a WebGPU implementation"
+ HOMEPAGE_URL "https://dawn.googlesource.com/dawn"
+ LANGUAGES C CXX
+)
+
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+if(NOT CMAKE_BUILD_TYPE)
+ message(WARNING "CMAKE_BUILD_TYPE not set, forcing it to Debug")
+ set(CMAKE_BUILD_TYPE "Debug" CACHE STRING
+ "Build type (Debug, Release, RelWithDebInfo, MinSizeRel)" FORCE)
+endif()
+
+set(DAWN_BUILD_GEN_DIR "${Dawn_BINARY_DIR}/gen")
+set(DAWN_GENERATOR_DIR "${Dawn_SOURCE_DIR}/generator")
+set(DAWN_SRC_DIR "${Dawn_SOURCE_DIR}/src")
+set(DAWN_INCLUDE_DIR "${DAWN_SRC_DIR}/include")
+set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates")
+set(DAWN_THIRD_PARTY_DIR "${Dawn_SOURCE_DIR}/third_party")
+
+set(DAWN_DUMMY_FILE "${DAWN_SRC_DIR}/Dummy.cpp")
+
+################################################################################
+# Configuration options
+################################################################################
+
+# Default values for the backend-enabling options
+set(ENABLE_D3D12 OFF)
+set(ENABLE_METAL OFF)
+set(ENABLE_OPENGL OFF)
+set(ENABLE_VULKAN OFF)
+set(USE_X11 OFF)
+if (WIN32)
+ set(ENABLE_D3D12 ON)
+ set(ENABLE_VULKAN ON)
+elseif(APPLE)
+ set(ENABLE_METAL ON)
+elseif(UNIX)
+ set(ENABLE_OPENGL ON)
+ set(ENABLE_VULKAN ON)
+ set(USE_X11 ON)
+endif()
+
+option(DAWN_ENABLE_D3D12 "Enable compilation of the D3D12 backend" ${ENABLE_D3D12})
+option(DAWN_ENABLE_METAL "Enable compilation of the Metal backend" ${ENABLE_METAL})
+option(DAWN_ENABLE_NULL "Enable compilation of the Null backend" ON)
+option(DAWN_ENABLE_OPENGL "Enable compilation of the OpenGL backend" ${ENABLE_OPENGL})
+option(DAWN_ENABLE_VULKAN "Enable compilation of the Vulkan backend" ${ENABLE_VULKAN})
+option(DAWN_ALWAYS_ASSERT "Enable assertions on all build types" OFF)
+option(DAWN_USE_X11 "Enable support for X11 surface" ${USE_X11})
+
+option(DAWN_BUILD_EXAMPLES "Enables building Dawn's exmaples" ON)
+
+set(DAWN_GLFW_DIR "${DAWN_THIRD_PARTY_DIR}/glfw" CACHE STRING "Directory in which to find GLFW")
+set(DAWN_GLM_DIR "${DAWN_THIRD_PARTY_DIR}/glm" CACHE STRING "Directory in which to find GLM")
+set(DAWN_GLSLANG_DIR "${DAWN_THIRD_PARTY_DIR}/glslang" CACHE STRING "Directory in which to find GLSLang")
+set(DAWN_JINJA2_DIR "${DAWN_THIRD_PARTY_DIR}/jinja2" CACHE STRING "Directory in which to find Jinja2")
+set(DAWN_SHADERC_DIR "${DAWN_THIRD_PARTY_DIR}/shaderc" CACHE STRING "Directory in which to find shaderc")
+set(DAWN_SPIRV_CROSS_DIR "${DAWN_THIRD_PARTY_DIR}/spirv-cross" CACHE STRING "Directory in which to find SPIRV-Cross")
+set(DAWN_SPIRV_HEADERS_DIR "${DAWN_THIRD_PARTY_DIR}/spirv-headers" CACHE STRING "Directory in which to find SPIRV-Headers")
+set(DAWN_SPIRV_TOOLS_DIR "${DAWN_THIRD_PARTY_DIR}/SPIRV-Tools" CACHE STRING "Directory in which to find SPIRV-Tools")
+
+################################################################################
+# Dawn's public and internal "configs"
+################################################################################
+
+# The public config contains only the include paths for the Dawn headers.
+add_library(dawn_public_config INTERFACE)
+target_include_directories(dawn_public_config INTERFACE
+ "${DAWN_SRC_DIR}/include"
+ "${DAWN_BUILD_GEN_DIR}/src/include"
+)
+
+# The internal config conatins additional path but includes the dawn_public_config include paths
+add_library(dawn_internal_config INTERFACE)
+target_include_directories(dawn_internal_config INTERFACE
+ "${DAWN_SRC_DIR}"
+ "${DAWN_BUILD_GEN_DIR}/src"
+)
+target_link_libraries(dawn_internal_config INTERFACE dawn_public_config)
+
+# Compile definitions for the internal config
+if (DAWN_ALWAYS_ASSERT OR $<CONFIG:Debug>)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_ASSERTS")
+endif()
+if (DAWN_ENABLE_D3D12)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_D3D12")
+endif()
+if (DAWN_ENABLE_METAL)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_METAL")
+endif()
+if (DAWN_ENABLE_NULL)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_NULL")
+endif()
+if (DAWN_ENABLE_OPENGL)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_OPENGL")
+endif()
+if (DAWN_ENABLE_VULKAN)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_ENABLE_BACKEND_VULKAN")
+endif()
+if (DAWN_USE_X11)
+ target_compile_definitions(dawn_internal_config INTERFACE "DAWN_USE_X11")
+endif()
+if (WIN32)
+ target_compile_definitions(dawn_internal_config INTERFACE "NOMINMAX" "WIN32_LEAN_AND_MEAN")
+endif()
+
+
+set(CMAKE_CXX_STANDARD "14")
+
+################################################################################
+# Run on all subdirectories
+################################################################################
+
+add_subdirectory(third_party)
+add_subdirectory(src/common)
+add_subdirectory(generator)
+add_subdirectory(src/dawn)
+add_subdirectory(src/dawn_platform)
+add_subdirectory(src/dawn_native)
+add_subdirectory(src/dawn_wire)
+
+if (DAWN_BUILD_EXAMPLES)
+ add_subdirectory(src/utils)
+ add_subdirectory(examples)
+endif()
diff --git a/chromium/third_party/dawn/DEPS b/chromium/third_party/dawn/DEPS
index 432cae95f69..f596feb8c86 100644
--- a/chromium/third_party/dawn/DEPS
+++ b/chromium/third_party/dawn/DEPS
@@ -51,42 +51,31 @@ deps = {
# SPIRV-Cross
'third_party/spirv-cross': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Cross@fd5aa3ad51ece55a1b51fe6bfb271db6844ae291',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Cross@9b3c5e12be12c55533f3bd3ab9cc617ec0f393d8',
'condition': 'dawn_standalone',
},
# SPIRV compiler dependencies: SPIRV-Tools, SPIRV-headers, glslang and shaderc
'third_party/SPIRV-Tools': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Tools@85f3e93d13f32d45bd7f9999aa51baddf2452aae',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Tools@fd773eb50d628c1981338addc093df879757c2cf',
'condition': 'dawn_standalone',
},
'third_party/spirv-headers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Headers@204cd131c42b90d129073719f2766293ce35c081',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Headers@f8bf11a0253a32375c32cad92c841237b96696c0',
'condition': 'dawn_standalone',
},
'third_party/glslang': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/glslang@38b4db48f98c4e3a9cc405de3a76547b857e1c37',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/glslang@08c02ced798afe357349d0e422cd474aa1eb0c79',
'condition': 'dawn_standalone',
},
'third_party/shaderc': {
- 'url': '{chromium_git}/external/github.com/google/shaderc@efedd6739684bb2d4183b45af111b4942b465e5b',
- 'condition': 'dawn_standalone',
- },
-
- # jsoncpp for perf tests trace events
- 'third_party/jsoncpp': {
- 'url': '{chromium_git}/chromium/src/third_party/jsoncpp@571788934b5ee8643d53e5d054534abbe6006168',
- 'condition': 'dawn_standalone',
- },
-
- 'third_party/jsoncpp/source': {
- 'url' : '{chromium_git}/external/github.com/open-source-parsers/jsoncpp@645250b6690785be60ab6780ce4b58698d884d11',
+ 'url': '{chromium_git}/external/github.com/google/shaderc@f085b9745fc1b8471f42aa2f8c54f3c73878ef07',
'condition': 'dawn_standalone',
},
# GLFW for tests and samples
'third_party/glfw': {
- 'url': '{chromium_git}/external/github.com/glfw/glfw@2de2589f910b1a85905f425be4d32f33cec092df',
+ 'url': '{chromium_git}/external/github.com/glfw/glfw@d973acc123826666ecc9e6fd475682e3d84c54a6',
'condition': 'dawn_standalone',
},
@@ -104,18 +93,18 @@ deps = {
# Khronos Vulkan-Headers
'third_party/vulkan-headers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-Headers@5b44df19e040fca0048ab30c553a8c2d2cb9623e',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-Headers@d287523f48dba1b669866c5d6625b29931948e39',
'condition': 'dawn_standalone',
},
# Khronos Vulkan-ValidationLayers
'third_party/vulkan-validation-layers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@9fba37afae13a11bd49ae942bf82e5bf1098e381',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@237d818e81fbffa073d29d94f53a2cbac4f25b9f',
'condition': 'dawn_standalone',
},
'third_party/swiftshader': {
- 'url': '{swiftshader_git}/SwiftShader@e7ce4e53915d026720005ca2c1831a5c28f77b3f',
+ 'url': '{swiftshader_git}/SwiftShader@51b2800bb317d9ab6026e6123c62f013dd5cf5e4',
'condition': 'dawn_standalone',
},
diff --git a/chromium/third_party/dawn/OWNERS b/chromium/third_party/dawn/OWNERS
index 06bf5663c5d..cde87a48d89 100644
--- a/chromium/third_party/dawn/OWNERS
+++ b/chromium/third_party/dawn/OWNERS
@@ -1,4 +1,5 @@
cwallez@chromium.org
kainino@chromium.org
+enga@chromium.org
# COMPONENT: Internals>GPU>Dawn
diff --git a/chromium/third_party/dawn/build_overrides/build.gni b/chromium/third_party/dawn/build_overrides/build.gni
index 1acd4a16562..9bb87130c41 100644
--- a/chromium/third_party/dawn/build_overrides/build.gni
+++ b/chromium/third_party/dawn/build_overrides/build.gni
@@ -12,16 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Tell Dawn and dependencies to not do Chromium-specific things
-build_with_chromium = false
+declare_args() {
+ # Tell Dawn and dependencies to not do Chromium-specific things
+ build_with_chromium = false
-# Use Chromium's binutils to have "hermetic" builds on bots
-linux_use_bundled_binutils_override = true
+ # Use Chromium's binutils to have "hermetic" builds on bots
+ linux_use_bundled_binutils_override = true
-# In standalone Dawn builds, don't try to use the hermetic install of Xcode
-# that Chromium uses
-use_system_xcode = true
+ # In standalone Dawn builds, don't try to use the hermetic install of Xcode
+ # that Chromium uses
+ use_system_xcode = ""
-# Android 32-bit non-component, non-clang builds cannot have symbol_level=2
-# due to 4GiB file size limit, see https://crbug.com/648948.
-ignore_elf32_limitations = false
+ # Android 32-bit non-component, non-clang builds cannot have symbol_level=2
+ # due to 4GiB file size limit, see https://crbug.com/648948.
+ ignore_elf32_limitations = false
+}
+
+# Detect whether we can use the hermetic XCode like in Chromium and do so if
+# possible.
+if (host_os == "mac" && use_system_xcode == "") {
+ _result = exec_script("//build/mac/should_use_hermetic_xcode.py",
+ [ target_os ],
+ "value")
+
+ assert(_result != 2,
+ "Do not allow building targets with the default" +
+ "hermetic toolchain if the minimum OS version is not met.")
+ assert(_result != 3,
+ "iOS does not support building with a hermetic toolchain. " +
+ "Please install Xcode.")
+
+ use_system_xcode = _result != 1
+}
diff --git a/chromium/third_party/dawn/build_overrides/dawn.gni b/chromium/third_party/dawn/build_overrides/dawn.gni
index 0d44f3ba3b6..0378ec1f974 100644
--- a/chromium/third_party/dawn/build_overrides/dawn.gni
+++ b/chromium/third_party/dawn/build_overrides/dawn.gni
@@ -27,7 +27,6 @@ dawn_standalone = true
dawn_jinja2_dir = "//third_party/jinja2"
dawn_glfw_dir = "//third_party/glfw"
dawn_googletest_dir = "//third_party/googletest"
-dawn_jsoncpp_dir = "//third_party/jsoncpp"
dawn_shaderc_dir = "//third_party/shaderc"
dawn_spirv_tools_dir = "//third_party/SPIRV-Tools"
dawn_spirv_cross_dir = "//third_party/spirv-cross"
diff --git a/chromium/third_party/dawn/dawn.json b/chromium/third_party/dawn/dawn.json
index 44fce28e624..bfb03aa0f7a 100644
--- a/chromium/third_party/dawn/dawn.json
+++ b/chromium/third_party/dawn/dawn.json
@@ -14,6 +14,27 @@
"See the License for the specific language governing permissions and",
"limitations under the License."
],
+ "adapter properties": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "device ID", "type": "uint32_t"},
+ {"name": "vendor ID", "type": "uint32_t"},
+ {"name": "name", "type": "char", "annotation": "const*"},
+ {"name": "adapter type", "type": "adapter type"},
+ {"name": "backend type", "type": "backend type"}
+ ]
+ },
+ "adapter type": {
+ "category": "enum",
+ "javascript": false,
+ "values": [
+ {"value": 0, "name": "discrete GPU"},
+ {"value": 1, "name": "integrated GPU"},
+ {"value": 2, "name": "CPU"},
+ {"value": 3, "name": "unknown"}
+ ]
+ },
"address mode": {
"category": "enum",
"values": [
@@ -22,6 +43,19 @@
{"value": 2, "name": "clamp to edge"}
]
},
+ "backend type": {
+ "category": "enum",
+ "javascript": false,
+ "values": [
+ {"value": 0, "name": "null"},
+ {"value": 1, "name": "D3D11"},
+ {"value": 2, "name": "D3D12"},
+ {"value": 3, "name": "metal"},
+ {"value": 4, "name": "vulkan"},
+ {"value": 5, "name": "openGL"},
+ {"value": 6, "name": "openGLES"}
+ ]
+ },
"bind group": {
"category": "object"
},
@@ -60,7 +94,8 @@
{"name": "has dynamic offset", "type": "bool", "default": "false"},
{"name": "multisampled", "type": "bool", "default": "false"},
{"name": "texture dimension", "type": "texture view dimension", "default": "undefined"},
- {"name": "texture component type", "type": "texture component type", "default": "float"}
+ {"name": "texture component type", "type": "texture component type", "default": "float"},
+ {"name": "storage texture format", "type": "texture format", "default": "undefined"}
]
},
"bind group layout descriptor": {
@@ -80,7 +115,9 @@
{"value": 2, "name": "readonly storage buffer"},
{"value": 3, "name": "sampler"},
{"value": 4, "name": "sampled texture"},
- {"value": 5, "name": "storage texture"}
+ {"value": 5, "name": "storage texture"},
+ {"value": 6, "name": "readonly storage texture"},
+ {"value": 7, "name": "writeonly storage texture"}
]
},
"blend descriptor": {
@@ -443,7 +480,7 @@
"name": "get bind group layout",
"returns": "bind group layout",
"args": [
- {"name": "group", "type": "uint32_t"}
+ {"name": "group index", "type": "uint32_t"}
]
}
]
@@ -561,6 +598,7 @@
"name": "create swap chain",
"returns": "swap chain",
"args": [
+ {"name": "surface", "type": "surface", "optional": "true"},
{"name": "descriptor", "type": "swap chain descriptor", "annotation": "const*"}
]
},
@@ -580,6 +618,9 @@
"TODO": "enga@: Make this a Dawn extension"
},
{
+ "name": "lose for testing"
+ },
+ {
"name": "tick"
},
{
@@ -590,6 +631,13 @@
]
},
{
+ "name": "set device lost callback",
+ "args": [
+ {"name": "callback", "type": "device lost callback"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
+ ]
+ },
+ {
"name": "push error scope",
"args": [
{"name": "filter", "type": "error filter"}
@@ -605,6 +653,13 @@
}
]
},
+ "device lost callback": {
+ "category": "callback",
+ "args": [
+ {"name": "message", "type": "char", "annotation": "const*"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
+ ]
+ },
"device properties": {
"category": "structure",
"extensible": false,
@@ -724,6 +779,23 @@
{"value": 1, "name": "uint32"}
]
},
+ "instance": {
+ "category": "object",
+ "methods": [
+ {
+ "name": "create surface",
+ "returns": "surface",
+ "args": [
+ {"name": "descriptor", "type": "surface descriptor", "annotation": "const*"}
+ ]
+ }
+ ]
+ },
+ "instance descriptor": {
+ "category": "structure",
+ "extensible": true,
+ "members": []
+ },
"vertex attribute descriptor": {
"category": "structure",
"extensible": false,
@@ -793,6 +865,14 @@
{"name": "bind group layouts", "type": "bind group layout", "annotation": "const*", "length": "bind group layout count"}
]
},
+ "present mode": {
+ "category": "enum",
+ "values": [
+ {"value": 0, "name": "immediate"},
+ {"value": 1, "name": "mailbox"},
+ {"value": 2, "name": "fifo"}
+ ]
+ },
"programmable stage descriptor": {
"category": "structure",
"extensible": true,
@@ -876,19 +956,19 @@
"name": "draw",
"args": [
{"name": "vertex count", "type": "uint32_t"},
- {"name": "instance count", "type": "uint32_t"},
- {"name": "first vertex", "type": "uint32_t"},
- {"name": "first instance", "type": "uint32_t"}
+ {"name": "instance count", "type": "uint32_t", "default": "1"},
+ {"name": "first vertex", "type": "uint32_t", "default": "0"},
+ {"name": "first instance", "type": "uint32_t", "default": "0"}
]
},
{
"name": "draw indexed",
"args": [
{"name": "index count", "type": "uint32_t"},
- {"name": "instance count", "type": "uint32_t"},
- {"name": "first index", "type": "uint32_t"},
- {"name": "base vertex", "type": "int32_t"},
- {"name": "first instance", "type": "uint32_t"}
+ {"name": "instance count", "type": "uint32_t", "default": "1"},
+ {"name": "first index", "type": "uint32_t", "default": "0"},
+ {"name": "base vertex", "type": "int32_t", "default": "0"},
+ {"name": "first instance", "type": "uint32_t", "default": "0"}
]
},
{
@@ -1022,19 +1102,19 @@
"name": "draw",
"args": [
{"name": "vertex count", "type": "uint32_t"},
- {"name": "instance count", "type": "uint32_t"},
- {"name": "first vertex", "type": "uint32_t"},
- {"name": "first instance", "type": "uint32_t"}
+ {"name": "instance count", "type": "uint32_t", "default": "1"},
+ {"name": "first vertex", "type": "uint32_t", "default": "0"},
+ {"name": "first instance", "type": "uint32_t", "default": "0"}
]
},
{
"name": "draw indexed",
"args": [
{"name": "index count", "type": "uint32_t"},
- {"name": "instance count", "type": "uint32_t"},
- {"name": "first index", "type": "uint32_t"},
- {"name": "base vertex", "type": "int32_t"},
- {"name": "first instance", "type": "uint32_t"}
+ {"name": "instance count", "type": "uint32_t", "default": "1"},
+ {"name": "first index", "type": "uint32_t", "default": "0"},
+ {"name": "base vertex", "type": "int32_t", "default": "0"},
+ {"name": "first instance", "type": "uint32_t", "default": "0"}
]
},
{
@@ -1133,7 +1213,7 @@
"name": "get bind group layout",
"returns": "bind group layout",
"args": [
- {"name": "group", "type": "uint32_t"}
+ {"name": "group index", "type": "uint32_t"}
]
}
]
@@ -1220,6 +1300,49 @@
{"name": "pass op", "type": "stencil operation", "default": "keep"}
]
},
+ "surface": {
+ "category": "object"
+ },
+ "surface descriptor": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
+ ]
+ },
+ "surface descriptor from HTML canvas id": {
+ "category": "structure",
+ "chained": true,
+ "members": [
+ {"name": "id", "type": "char", "annotation": "const*", "length": "strlen"}
+ ]
+ },
+ "surface descriptor from metal layer": {
+ "category": "structure",
+ "chained": true,
+ "javascript": false,
+ "members": [
+ {"name": "layer", "type": "void", "annotation": "*"}
+ ]
+ },
+ "surface descriptor from windows HWND": {
+ "category": "structure",
+ "chained": true,
+ "javascript": false,
+ "members": [
+ {"name": "hinstance", "type": "void", "annotation": "*"},
+ {"name": "hwnd", "type": "void", "annotation": "*"}
+ ]
+ },
+ "surface descriptor from xlib": {
+ "category": "structure",
+ "chained": true,
+ "javascript": false,
+ "members": [
+ {"name": "display", "type": "void", "annotation": "*"},
+ {"name": "window", "type": "uint32_t"}
+ ]
+ },
"swap chain": {
"category": "object",
"methods": [
@@ -1241,7 +1364,23 @@
"extensible": true,
"members": [
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
- {"name": "implementation", "type": "uint64_t"}
+ {"name": "usage", "type": "texture usage"},
+ {"name": "format", "type": "texture format"},
+ {"name": "width", "type": "uint32_t"},
+ {"name": "height", "type": "uint32_t"},
+ {"name": "present mode", "type": "present mode"},
+ {"name": "implementation", "type": "uint64_t", "default": 0}
+ ]
+ },
+ "s type": {
+ "category": "enum",
+ "javascript": false,
+ "values": [
+ {"value": 0, "name": "invalid"},
+ {"value": 1, "name": "surface descriptor from metal layer"},
+ {"value": 2, "name": "surface descriptor from windows HWND"},
+ {"value": 3, "name": "surface descriptor from xlib"},
+ {"value": 4, "name": "surface descriptor from HTML canvas id"}
]
},
"texture": {
@@ -1310,7 +1449,7 @@
"texture format": {
"category": "enum",
"values": [
- {"value": 0, "name": "undefined", "valid": false},
+ {"value": 0, "name": "undefined", "valid": false, "jsrepr": "undefined"},
{"value": 1, "name": "R8 unorm"},
{"value": 2, "name": "R8 snorm"},
{"value": 3, "name": "R8 uint"},
@@ -1406,7 +1545,7 @@
"texture view dimension": {
"category": "enum",
"values": [
- {"value": 0, "name": "undefined", "valid": false},
+ {"value": 0, "name": "undefined", "valid": false, "jsrepr": "undefined"},
{"value": 1, "name": "1D"},
{"value": 2, "name": "2D"},
{"value": 3, "name": "2D array"},
diff --git a/chromium/third_party/dawn/dawn_wire.json b/chromium/third_party/dawn/dawn_wire.json
index cad1d41b536..6493349fd8a 100644
--- a/chromium/third_party/dawn/dawn_wire.json
+++ b/chromium/third_party/dawn/dawn_wire.json
@@ -74,6 +74,9 @@
{ "name": "type", "type": "error type"},
{ "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
],
+ "device lost callback" : [
+ { "name": "message", "type": "char", "annotation": "const*", "length": "strlen" }
+ ],
"device pop error scope callback": [
{ "name": "request serial", "type": "uint64_t" },
{ "name": "type", "type": "error type" },
@@ -86,7 +89,10 @@
},
"special items": {
"client_side_structures": [
- "CreateBufferMappedResult"
+ "CreateBufferMappedResult",
+ "SurfaceDescriptorFromMetalLayer",
+ "SurfaceDescriptorFromWindowsHWND",
+ "SurfaceDescriptorFromXlib"
],
"client_side_commands": [
"BufferMapReadAsync",
@@ -94,6 +100,7 @@
"BufferSetSubData",
"DeviceCreateBufferMappedAsync",
"DevicePopErrorScope",
+ "DeviceSetDeviceLostCallback",
"DeviceSetUncapturedErrorCallback",
"FenceGetCompletedValue",
"FenceOnCompletion"
diff --git a/chromium/third_party/dawn/docs/fuzzing.md b/chromium/third_party/dawn/docs/fuzzing.md
new file mode 100644
index 00000000000..8c7b7baffa7
--- /dev/null
+++ b/chromium/third_party/dawn/docs/fuzzing.md
@@ -0,0 +1,26 @@
+# Fuzzing Dawn
+
+## `dawn_wire_server_and_frontend_fuzzer`
+
+The `dawn_wire_server_and_frontend_fuzzer` sets up Dawn using the Null backend, and passes inputs to the wire server. This fuzzes the `dawn_wire` deserialization, as well as Dawn's frontend validation.
+
+## `dawn_wire_server_and_vulkan_backend_fuzzer`
+
+The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_frontend_fuzzer` but it runs using a Vulkan CPU backend such as Swiftshader. This fuzzer supports error injection by using the first bytes of the fuzzing input as a Vulkan call index for which to mock a failure.
+
+## Updating the Seed Corpus
+
+Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
+
+The script [update_fuzzer_seed_corpus.sh](../scripts/update_fuzzer_seed_corpus.sh) can be used to capture a trace while running Dawn tests, and upload it to the existing fuzzer seed corpus. It does the following steps:
+1. Builds the provided test and fuzzer targets.
+2. Runs the provided test target with `--use-wire --wire-trace-dir=tmp_dir1 [additional_test_args]` to dump traces of the tests.
+3. Generates one variant of each trace for every possible error index, by running the fuzzer target with `--injected-error-testcase-dir=tmp_dir2 ...`.
+4. Minimizes all testcases by running the fuzzer target with `-merge=1 tmp_dir3 tmp_dir1 tmp_dir2`.
+
+To run the script:
+1. You must be in a Chromium checkout using the GN arg `use_libfuzzer=true`
+2. Run `./third_party/dawn/scripts/update_fuzzer_seed_corpus.sh <out_dir> <fuzzer> <test> [additional_test_args]`.
+
+ Example: `./third_party/dawn/scripts/update_fuzzer_seed_corpus.sh out/fuzz dawn_wire_server_and_vulkan_backend_fuzzer dawn_end2end_tests --gtest_filter=*Vulkan`
+3. The script will print instructions for testing, and then uploading new inputs. Please, only upload inputs after testing the fuzzer with new inputs, and verifying there is a meaningful change in coverage. Uploading requires [gcloud](https://g3doc.corp.google.com/cloud/sdk/g3doc/index.md?cl=head) to be logged in with @google.com credentials: `gcloud auth login`.
diff --git a/chromium/third_party/dawn/docs/infra.md b/chromium/third_party/dawn/docs/infra.md
index 8870a38c19d..605d9cad187 100644
--- a/chromium/third_party/dawn/docs/infra.md
+++ b/chromium/third_party/dawn/docs/infra.md
@@ -1,3 +1,92 @@
-# Dawn's contiuous testing infrastructure
+# Dawn's Continuous Testing Infrastructure
-(TODO)
+Dawn uses Chromium's continuous integration (CI) infrastructure to continually run tests on changes to Dawn and provide a way for developers to run tests against their changes before submitting. CI bots continually build and run tests for every new change, and Try bots build and run developers' pending changes before submission. Dawn uses two different build recipes. There is a Dawn build recipe which checks out Dawn standalone, compiles, and runs the `dawn_unittests`. And, there is the Chromium build recipe which checks out Dawn inside a Chromium checkout. Inside a Chromium checkout, there is more infrastructure available for triggering `dawn_end2end_tests` that run on real GPU hardware, and we are able to run Chromium integration tests as well as tests for WebGPU.
+
+ - [Dawn CI Builders](https://ci.chromium.org/p/dawn/g/ci/builders)
+ - [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders)
+ - [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console)
+
+For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/master/docs/gpu/gpu_testing_bot_details.md).
+
+## Dawn CI/Try Builders
+Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders:
+ - [dawn/try/presubmit](https://ci.chromium.org/p/dawn/builders/try/presubmit)
+ - [dawn/try/linux-clang-dbg-x64](https://ci.chromium.org/p/dawn/builders/try/linux-clang-dbg-x64)
+ - [dawn/try/linux-clang-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/linux-clang-dbg-x86)
+ - [dawn/try/linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/try/linux-clang-rel-x64)
+ - [dawn/try/mac-dbg](https://ci.chromium.org/p/dawn/builders/try/mac-dbg)
+ - [dawn/try/mac-rel](https://ci.chromium.org/p/dawn/builders/try/mac-rel)
+ - [dawn/try/win-clang-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/win-clang-dbg-x86)
+ - [dawn/try/win-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/try/win-clang-rel-x64)
+ - [dawn/try/win-msvc-dbg-x86](https://ci.chromium.org/p/dawn/builders/try/win-msvc-dbg-x86)
+ - [dawn/try/win-msvc-rel-x64](https://ci.chromium.org/p/dawn/builders/try/win-msvc-rel-x64)
+
+There are additional `chromium/try` builders, but those are described later in this document.
+
+These bots are defined in both buckets luci.dawn.ci and luci.dawn.try, though their ACL permissions differ. luci.dawn.ci bots will be scheduled regularly based on [[dawn]//infra/config/global/luci-scheduler.cfg](../infra/config/global/luci-scheduler.cfg). luci.dawn.try bots will be triggered on the CQ based on [[dawn]//infra/config/global/commit-queue.cfg](../infra/config/global/commit-queue.cfg).
+
+One particular note is `buckets.swarming.builder_defaults.recipe.name: "dawn"` which specifies these use the [`dawn.py`](https://source.chromium.org/search/?q=file:recipes/dawn.py) build recipe.
+
+Build status for both CI and Try builders can be seen at this [console](https://ci.chromium.org/p/dawn) which is generated from [[dawn]//infra/config/global/luci-milo.cfg](../infra/config/global/luci-milo.cfg).
+
+## Dawn Build Recipe
+The [`dawn.py`](https://cs.chromium.org/search/?q=file:recipes/dawn.py) build recipe is simple and intended only for testing compilation and unit tests. It does the following:
+ 1. Checks out Dawn standalone and dependencies
+ 2. Builds based on the `builder_mixins.recipe.properties` coming from the builder config in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg).
+ 3. Runs the `dawn_unittests` on that same bot.
+
+## Dawn Chromium-Based CI Waterfall Bots
+The [`chromium.dawn`](https://ci.chromium.org/p/chromium/g/chromium.dawn/console) waterfall consists of the bots specified in the `chromium.dawn` section of [[chromium/src]//testing/buildbot/waterfalls.pyl](https://source.chromium.org/search/?q=file:waterfalls.pyl%20chromium.dawn). Bots named "Builder" are responsible for building top-of-tree Dawn, whereas bots named "DEPS Builder" are responsible for building Chromium's DEPS version of Dawn.
+
+The other bots, such as "Dawn Linux x64 DEPS Release (Intel HD 630)" receive the build products from the Builders and are responsible for running tests. The Tester configuration may specify `mixins` from [[chromium/src]//testing/buildbot/mixins.pyl](https://source.chromium.org/search/?q=file:buildbot/mixins.pyl) which help specify bot test dimensions like OS version and GPU vendor. The Tester configuration also specifies `test_suites` from [[chromium/src]//testing/buildbot/test_suites.pyl](https://source.chromium.org/search/?q=file:buildbot/test_suites.pyl%20dawn_end2end_tests) which declare the tests are arguments passed to tests that should be run on the bot.
+
+The Builder and Tester bots are additionally configured at [[chromium/tools/build]//scripts/slave/recipe_modules/chromium_tests/chromium_dawn.py](https://source.chromium.org/search?q=file:chromium_dawn.py) which defines the bot specs for the builders and testers. Some things to note:
+ - The Tester bots set `parent_buildername` to be their respective Builder bot.
+ - The non DEPS bots use the `dawn_top_of_tree` config.
+ - The bots apply the `mb` config which references [[chromium]//tools/mb/mb_config.pyl](https://source.chromium.org/search?q=file:mb_config.pyl%20%22Dawn%20Linux%20x64%20Builder%22) and [[chromium]//tools/mb/mb_config_buckets.pyl](https://source.chromium.org/search?q=file:mb_config_buckets.pyl%20%22Dawn%20Linux%20x64%20Builder%22). Various mixins there specify build dimensions like debug, release, gn args, x86, x64, etc.
+
+Finally, builds on these waterfall bots are automatically scheduled based on the configuration in [[chromium/src]//infra/config/buckets/ci.star](https://source.chromium.org/search?q=file:ci.star%20%22Dawn%20Linux%20x64%20Builder%22). Note that the Tester bots are `triggered_by` the Builder bots.
+
+## Dawn Chromium-Based Tryjobs
+[[dawn]//infra/config/global/commit-queue.cfg](../infra/config/global/commit-queue.cfg) declares additional tryjob builders which are defined in the Chromium workspace. The reason for this separation is that jobs sent to these bots rely on the Chromium infrastructure for doing builds and triggering jobs on bots with GPU hardware in swarming.
+
+At the time of writing, the bots for Dawn CLs are:
+ - [chromium/try/linux-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/linux-dawn-rel)
+ - [chromium/try/mac-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/mac-dawn-rel)
+ - [chromium/try/win-dawn-rel](https://ci.chromium.org/p/chromium/builders/try/win-dawn-rel)
+
+And for Chromium CLs:
+ - [chromium/try/dawn-linux-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-linux-x64-deps-rel)
+ - [chromium/try/dawn-mac-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-mac-x64-deps-rel)
+ - [chromium/try/dawn-win10-x86-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-win10-x86-deps-rel)
+ - [chromium/try/dawn-win10-x64-deps-rel](https://ci.chromium.org/p/chromium/builders/try/dawn-win10-x64-deps-rel)
+
+ The configuration for these bots is generated from [[chromium]//infra/config/buckets/try.star](https://source.chromium.org/search/?q=file:try.star%20linux-dawn-rel) which uses the [`chromium_dawn_builder`](https://source.chromium.org/search/?q=%22def%20chromium_dawn_builder%22) function which sets the `mastername` to `tryserver.chromium.dawn`.
+
+[[chromium/tools/build]//scripts/slave/recipe_modules/chromium_tests/trybots.py](https://source.chromium.org/search/?q=file:trybots.py%20tryserver.chromium.dawn) specifies `tryserver.chromium.dawn` bots as mirroring bots from the `chromium.dawn` waterfall. Example:
+```
+'dawn-linux-x64-deps-rel': {
+ 'bot_ids': [
+ {
+ 'mastername': 'chromium.dawn',
+ 'buildername': 'Dawn Linux x64 DEPS Builder',
+ 'tester': 'Dawn Linux x64 DEPS Release (Intel HD 630)',
+ },
+ {
+ 'mastername': 'chromium.dawn',
+ 'buildername': 'Dawn Linux x64 DEPS Builder',
+ 'tester': 'Dawn Linux x64 DEPS Release (NVIDIA)',
+ },
+ ],
+},
+```
+
+Using the [[chromium/tools/build]//scripts/slave/recipes/chromium_trybot.py](https://source.chromium.org/search/?q=file:chromium_trybot.py) recipe, these trybots will cherry-pick a CL and run the same tests as the CI waterfall bots. The trybots also pick up some build mixins from [[chromium]//tools/mb/mb_config.pyl](https://source.chromium.org/search?q=file:mb_config.pyl%20dawn-linux-x64-deps-rel).
+
+## Bot Allocation
+
+Bots are physically allocated based on the configuration in [[chromium/infradata/config]//configs/chromium-swarm/starlark/bots/dawn.star](https://chrome-internal.googlesource.com/infradata/config/+/refs/heads/master/configs/chromium-swarm/starlark/bots/dawn.star) (Google only).
+
+`dawn/try` bots are using builderless configurations which means they use builderless GCEs shared with Chromium bots and don't need explicit allocation.
+
+`chromium/try` bots are still explicitly allocated with a number of GCE instances and lifetime of the build cache. All of the GCE bots should eventually be migrated to builderless (crbug.com/dawn/328). Mac bots such as `dawn-mac-x64-deps-rel`, `mac-dawn-rel`, `Dawn Mac x64 Builder`, and `Dawn Mac x64 DEPS Builder` point to specific ranges of machines that have been reserved by the infrastructure team.
diff --git a/chromium/third_party/dawn/docs/testing.md b/chromium/third_party/dawn/docs/testing.md
index c2717a11102..20d715f8939 100644
--- a/chromium/third_party/dawn/docs/testing.md
+++ b/chromium/third_party/dawn/docs/testing.md
@@ -1,3 +1,69 @@
# Testing Dawn
(TODO)
+
+## Dawn Perf Tests
+
+For benchmarking with `dawn_perf_tests`, it's best to build inside a Chromium checkout using the following GN args:
+```
+is_official_build = true # Enables highest optimization level, using LTO on some platforms
+use_dawn = true # Required to build Dawn
+use_cfi_icall=false # Required because Dawn dynamically loads function pointers, and we don't sanitize them yet.
+```
+
+A Chromium checkout is required for the highest optimization flags. It is possible to build and run `dawn_perf_tests` from a standalone Dawn checkout as well, only using GN arg `is_debug=false`. For more information on building, please see [building.md](./building.md).
+
+### Terminology
+
+ - Iteration: The unit of work being measured. It could be a frame, a draw call, a data upload, a computation, etc. `dawn_perf_tests` metrics are reported as time per iteration.
+ - Step: A group of Iterations run together. The number of `iterationsPerStep` is provided to the constructor of `DawnPerfTestBase`.
+ - Trial: A group of Steps run consecutively. `kNumTrials` are run for each test. A Step in a Trial is run repetitively for approximately `kCalibrationRunTimeSeconds`. Metrics are accumlated per-trial and reported as the total time divided by `numSteps * iterationsPerStep`. `maxStepsInFlight` is passed to the `DawnPerfTestsBase` constructor to limit the number of Steps pipelined.
+
+(See [`//src/tests/perf_tests/DawnPerfTest.h`](https://cs.chromium.org/chromium/src/third_party/dawn/src/tests/perf_tests/DawnPerfTest.h) for the values of the constants).
+
+### Metrics
+
+`dawn_perf_tests` measures the following metrics:
+ - `wall_time`: The time per iteration, including time waiting for the GPU between Steps in a Trial.
+ - `cpu_time`: The time per iteration, not including time waiting for the GPU between Steps in a Trial.
+ - `validation_time`: The time for CommandBuffer / RenderBundle validation.
+ - `recording_time`: The time to convert Dawn commands to native commands.
+
+Metrics are reported according to the format specified at
+[[chromium]//build/scripts/slave/performance_log_processor.py](https://cs.chromium.org/chromium/build/scripts/slave/performance_log_processor.py)
+
+### Dumping Trace Files
+
+The test harness supports a `--trace-file=path/to/trace.json` argument where Dawn trace events can be dumped. The traces can be viewed in Chrome's `about://tracing` viewer.
+
+### Test Runner
+
+[`//scripts/perf_test_runner.py`](https://cs.chromium.org/chromium/src/third_party/dawn/scripts/perf_test_runner.py) may be run to continuously run a test and report mean times and variances.
+
+Currently the script looks in the `out/Release` build directory and measures the `wall_time` metric (hardcoded into the script). These should eventually become arguments.
+
+Example usage:
+
+```
+scripts/perf_test_runner.py DrawCallPerf.Run/Vulkan__e_skip_validation
+```
+
+### Tests
+
+**BufferUploadPerf**
+
+Tests repetitively uploading data to the GPU using either `SetSubData` or `CreateBufferMapped`.
+
+**DrawCallPerf**
+
+DrawCallPerf tests drawing a simple triangle with many ways of encoding commands,
+binding, and uploading data to the GPU. The rationale for this is the following:
+ - Static/Multiple/Dynamic vertex buffers: Tests switching buffer bindings. This has
+ a state tracking cost as well as a GPU driver cost.
+ - Static/Multiple/Dynamic bind groups: Same rationale as vertex buffers
+ - Static/Dynamic pipelines: In addition to a change to GPU state, changing the pipeline
+ layout incurs additional state tracking costs in Dawn.
+ - With/Without render bundles: All of the above can have lower validation costs if
+ precomputed in a render bundle.
+ - Static/Dynamic data: Updating data for each draw is a common use case. It also tests
+ the efficiency of resource transitions.
diff --git a/chromium/third_party/dawn/examples/Animometer.cpp b/chromium/third_party/dawn/examples/Animometer.cpp
index 1cc5eaa82ac..6657f5199c9 100644
--- a/chromium/third_party/dawn/examples/Animometer.cpp
+++ b/chromium/third_party/dawn/examples/Animometer.cpp
@@ -161,7 +161,7 @@ void frame() {
for (size_t i = 0; i < kNumTriangles; i++) {
uint32_t offset = i * sizeof(ShaderData);
pass.SetBindGroup(0, bindGroup, 1, &offset);
- pass.Draw(3, 1, 0, 0);
+ pass.Draw(3);
}
pass.EndPass();
diff --git a/chromium/third_party/dawn/examples/CHelloTriangle.cpp b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
index d457e38ab57..ffd269c5b19 100644
--- a/chromium/third_party/dawn/examples/CHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
@@ -29,11 +29,9 @@ void init() {
queue = wgpuDeviceCreateQueue(device);
{
- WGPUSwapChainDescriptor descriptor;
- descriptor.nextInChain = nullptr;
- descriptor.label = nullptr;
+ WGPUSwapChainDescriptor descriptor = {};
descriptor.implementation = GetSwapChainImplementation();
- swapchain = wgpuDeviceCreateSwapChain(device, &descriptor);
+ swapchain = wgpuDeviceCreateSwapChain(device, nullptr, &descriptor);
}
swapChainFormat = static_cast<WGPUTextureFormat>(GetPreferredSwapChainTextureFormat());
wgpuSwapChainConfigure(swapchain, swapChainFormat, WGPUTextureUsage_OutputAttachment, 640, 480);
@@ -50,7 +48,7 @@ void init() {
const char* fs =
"#version 450\n"
- "layout(location = 0) out vec4 fragColor;"
+ "layout(location = 0) out vec4 fragColor;\n"
"void main() {\n"
" fragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n";
@@ -58,28 +56,23 @@ void init() {
utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, fs).Release();
{
- WGPURenderPipelineDescriptor descriptor;
- descriptor.label = nullptr;
- descriptor.nextInChain = nullptr;
+ WGPURenderPipelineDescriptor descriptor = {};
- descriptor.vertexStage.nextInChain = nullptr;
descriptor.vertexStage.module = vsModule;
descriptor.vertexStage.entryPoint = "main";
- WGPUProgrammableStageDescriptor fragmentStage;
- fragmentStage.nextInChain = nullptr;
+ WGPUProgrammableStageDescriptor fragmentStage = {};
fragmentStage.module = fsModule;
fragmentStage.entryPoint = "main";
descriptor.fragmentStage = &fragmentStage;
descriptor.sampleCount = 1;
- WGPUBlendDescriptor blendDescriptor;
+ WGPUBlendDescriptor blendDescriptor = {};
blendDescriptor.operation = WGPUBlendOperation_Add;
blendDescriptor.srcFactor = WGPUBlendFactor_One;
blendDescriptor.dstFactor = WGPUBlendFactor_One;
- WGPUColorStateDescriptor colorStateDescriptor;
- colorStateDescriptor.nextInChain = nullptr;
+ WGPUColorStateDescriptor colorStateDescriptor = {};
colorStateDescriptor.format = swapChainFormat;
colorStateDescriptor.alphaBlend = blendDescriptor;
colorStateDescriptor.colorBlend = blendDescriptor;
@@ -88,22 +81,18 @@ void init() {
descriptor.colorStateCount = 1;
descriptor.colorStates = &colorStateDescriptor;
- WGPUPipelineLayoutDescriptor pl;
- pl.nextInChain = nullptr;
- pl.label = nullptr;
+ WGPUPipelineLayoutDescriptor pl = {};
pl.bindGroupLayoutCount = 0;
pl.bindGroupLayouts = nullptr;
descriptor.layout = wgpuDeviceCreatePipelineLayout(device, &pl);
- WGPUVertexStateDescriptor vertexState;
- vertexState.nextInChain = nullptr;
+ WGPUVertexStateDescriptor vertexState = {};
vertexState.indexFormat = WGPUIndexFormat_Uint32;
vertexState.vertexBufferCount = 0;
vertexState.vertexBuffers = nullptr;
descriptor.vertexState = &vertexState;
- WGPURasterizationStateDescriptor rasterizationState;
- rasterizationState.nextInChain = nullptr;
+ WGPURasterizationStateDescriptor rasterizationState = {};
rasterizationState.frontFace = WGPUFrontFace_CCW;
rasterizationState.cullMode = WGPUCullMode_None;
rasterizationState.depthBias = 0;
@@ -126,10 +115,8 @@ void init() {
void frame() {
WGPUTextureView backbufferView = wgpuSwapChainGetCurrentTextureView(swapchain);
- WGPURenderPassDescriptor renderpassInfo;
- renderpassInfo.nextInChain = nullptr;
- renderpassInfo.label = nullptr;
- WGPURenderPassColorAttachmentDescriptor colorAttachment;
+ WGPURenderPassDescriptor renderpassInfo = {};
+ WGPURenderPassColorAttachmentDescriptor colorAttachment = {};
{
colorAttachment.attachment = backbufferView;
colorAttachment.resolveTarget = nullptr;
diff --git a/chromium/third_party/dawn/examples/CMakeLists.txt b/chromium/third_party/dawn/examples/CMakeLists.txt
new file mode 100644
index 00000000000..896b87ed03d
--- /dev/null
+++ b/chromium/third_party/dawn/examples/CMakeLists.txt
@@ -0,0 +1,44 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_sample_utils STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_sample_utils PRIVATE
+ "SampleUtils.cpp"
+ "SampleUtils.h"
+)
+target_link_libraries(dawn_sample_utils PUBLIC
+ dawn_internal_config
+ dawncpp
+ dawn_proc
+ dawn_common
+ dawn_native
+ dawn_wire
+ dawn_utils
+ glfw
+)
+
+add_executable(CppHelloTriangle "CppHelloTriangle.cpp")
+target_link_libraries(CppHelloTriangle dawn_sample_utils)
+
+add_executable(CHelloTriangle "CHelloTriangle.cpp")
+target_link_libraries(CHelloTriangle dawn_sample_utils)
+
+add_executable(ComputeBoids "ComputeBoids.cpp")
+target_link_libraries(ComputeBoids dawn_sample_utils glm)
+
+add_executable(Animometer "Animometer.cpp")
+target_link_libraries(Animometer dawn_sample_utils)
+
+add_executable(CubeReflection "CubeReflection.cpp")
+target_link_libraries(CubeReflection dawn_sample_utils glm)
diff --git a/chromium/third_party/dawn/examples/ComputeBoids.cpp b/chromium/third_party/dawn/examples/ComputeBoids.cpp
index 1495387d78e..71d7402e172 100644
--- a/chromium/third_party/dawn/examples/ComputeBoids.cpp
+++ b/chromium/third_party/dawn/examples/ComputeBoids.cpp
@@ -269,7 +269,7 @@ wgpu::CommandBuffer createCommandBuffer(const wgpu::TextureView backbufferView,
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
pass.SetPipeline(updatePipeline);
pass.SetBindGroup(0, updateBGs[i]);
- pass.Dispatch(kNumParticles, 1, 1);
+ pass.Dispatch(kNumParticles);
pass.EndPass();
}
@@ -279,7 +279,7 @@ wgpu::CommandBuffer createCommandBuffer(const wgpu::TextureView backbufferView,
pass.SetPipeline(renderPipeline);
pass.SetVertexBuffer(0, bufferDst);
pass.SetVertexBuffer(1, modelBuffer);
- pass.Draw(3, kNumParticles, 0, 0);
+ pass.Draw(3, kNumParticles);
pass.EndPass();
}
diff --git a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
index 05e7a5c57f5..78e0cb8bad3 100644
--- a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
@@ -163,7 +163,7 @@ void frame() {
pass.SetBindGroup(0, bindGroup);
pass.SetVertexBuffer(0, vertexBuffer);
pass.SetIndexBuffer(indexBuffer);
- pass.DrawIndexed(3, 1, 0, 0, 0);
+ pass.DrawIndexed(3);
pass.EndPass();
}
diff --git a/chromium/third_party/dawn/examples/CubeReflection.cpp b/chromium/third_party/dawn/examples/CubeReflection.cpp
index 21027837a7b..b07f1a12838 100644
--- a/chromium/third_party/dawn/examples/CubeReflection.cpp
+++ b/chromium/third_party/dawn/examples/CubeReflection.cpp
@@ -274,18 +274,18 @@ void frame() {
pass.SetBindGroup(0, bindGroup[0]);
pass.SetVertexBuffer(0, vertexBuffer);
pass.SetIndexBuffer(indexBuffer);
- pass.DrawIndexed(36, 1, 0, 0, 0);
+ pass.DrawIndexed(36);
pass.SetStencilReference(0x1);
pass.SetPipeline(planePipeline);
pass.SetBindGroup(0, bindGroup[0]);
pass.SetVertexBuffer(0, planeBuffer);
- pass.DrawIndexed(6, 1, 0, 0, 0);
+ pass.DrawIndexed(6);
pass.SetPipeline(reflectionPipeline);
pass.SetVertexBuffer(0, vertexBuffer);
pass.SetBindGroup(0, bindGroup[1]);
- pass.DrawIndexed(36, 1, 0, 0, 0);
+ pass.DrawIndexed(36);
pass.EndPass();
}
diff --git a/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
new file mode 100644
index 00000000000..acd0d87ecf0
--- /dev/null
+++ b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
@@ -0,0 +1,362 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is an example to manually test swapchain code. Controls are the following, scoped to the
+// currently focused window:
+// - W: creates a new window.
+// - L: Latches the current swapchain, to check what happens when the window changes but not the
+// swapchain.
+// - R: switches the rendering mode, between "The Red Triangle" and color-cycling clears that's
+// (WARNING) likely seizure inducing.
+// - D: cycles the divisor for the swapchain size.
+// - P: switches present modes.
+//
+// Closing all the windows exits the example. ^C also works.
+//
+// Things to test manually:
+//
+// - Basic tests (with the triangle render mode):
+// - Check the triangle is red on a black background and with the pointy side up.
+// - Cycle render modes a bunch and check that the triangle background is always solid black.
+// - Check that rendering triangles to multiple windows works.
+//
+// - Present mode single-window tests (with cycling color render mode):
+// - Check that Fifo cycles at about 1 cycle per second and has no tearing.
+// - Check that Mailbox cycles faster than Fifo and has no tearing.
+// - Check that Immediate cycles faster than Fifo, it is allowed to have tearing. (dragging
+// between two monitors can help see tearing)
+//
+// - Present mode multi-window tests, it should have the same results as single-window tests when
+// all windows are in the same present mode. In mixed present modes only Immediate windows are
+// allowed to tear.
+//
+// - Resizing tests (with the triangle render mode):
+// - Check that cycling divisors on the triangle produces lower and lower resolution triangles.
+// - Check latching the swapchain config and resizing the window a bunch (smaller, bigger, and
+// diagonal aspect ratio).
+//
+// - Config change tests:
+// - Check that cycling between present modes works.
+// - TODO can't be tested yet: check cycling the same window over multiple devices.
+// - TODO can't be tested yet: check cycling the same window over multiple formats.
+
+#include "common/Assert.h"
+#include "common/Log.h"
+#include "utils/ComboRenderPipelineDescriptor.h"
+#include "utils/GLFWUtils.h"
+#include "utils/WGPUHelpers.h"
+
+#include <dawn/dawn_proc.h>
+#include <dawn/webgpu_cpp.h>
+#include <dawn_native/DawnNative.h>
+#include "GLFW/glfw3.h"
+
+#include <memory>
+#include <unordered_map>
+
+struct WindowData {
+ GLFWwindow* window = nullptr;
+ uint64_t serial = 0;
+
+ float clearCycle = 1.0f;
+ bool latched = false;
+ bool renderTriangle = true;
+ uint32_t divisor = 1;
+
+ wgpu::Surface surface = nullptr;
+ wgpu::SwapChain swapchain = nullptr;
+
+ wgpu::SwapChainDescriptor currentDesc;
+ wgpu::SwapChainDescriptor targetDesc;
+};
+
+static std::unordered_map<GLFWwindow*, std::unique_ptr<WindowData>> windows;
+static uint64_t windowSerial = 0;
+
+static std::unique_ptr<dawn_native::Instance> instance;
+static wgpu::Device device;
+static wgpu::Queue queue;
+static wgpu::RenderPipeline trianglePipeline;
+
+bool IsSameDescriptor(const wgpu::SwapChainDescriptor& a, const wgpu::SwapChainDescriptor& b) {
+ return a.usage == b.usage && a.format == b.format && a.width == b.width &&
+ a.height == b.height && a.presentMode == b.presentMode;
+}
+
+void OnKeyPress(GLFWwindow* window, int key, int, int action, int);
+
+void SyncFromWindow(WindowData* data) {
+ int width;
+ int height;
+ glfwGetFramebufferSize(data->window, &width, &height);
+
+ data->targetDesc.width = std::max(1u, width / data->divisor);
+ data->targetDesc.height = std::max(1u, height / data->divisor);
+}
+
+void AddWindow() {
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+ GLFWwindow* window = glfwCreateWindow(400, 400, "", nullptr, nullptr);
+ glfwSetKeyCallback(window, OnKeyPress);
+
+ wgpu::SwapChainDescriptor descriptor;
+ descriptor.usage = wgpu::TextureUsage::OutputAttachment;
+ descriptor.format = wgpu::TextureFormat::BGRA8Unorm;
+ descriptor.width = 0;
+ descriptor.height = 0;
+ descriptor.presentMode = wgpu::PresentMode::Fifo;
+
+ std::unique_ptr<WindowData> data = std::make_unique<WindowData>();
+ data->window = window;
+ data->serial = windowSerial++;
+ data->surface = utils::CreateSurfaceForWindow(instance->Get(), window);
+ data->currentDesc = descriptor;
+ data->targetDesc = descriptor;
+ SyncFromWindow(data.get());
+
+ windows[window] = std::move(data);
+}
+
+void DoRender(WindowData* data) {
+ wgpu::TextureView view = data->swapchain.GetCurrentTextureView();
+ wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+ if (data->renderTriangle) {
+ utils::ComboRenderPassDescriptor desc({view});
+ // Use Load to check the swapchain is lazy cleared (we shouldn't see garbage from previous
+ // frames).
+ desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Load;
+
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
+ pass.SetPipeline(trianglePipeline);
+ pass.Draw(3);
+ pass.EndPass();
+ } else {
+ data->clearCycle -= 1.0 / 60.f;
+ if (data->clearCycle < 0.0) {
+ data->clearCycle = 1.0f;
+ }
+
+ utils::ComboRenderPassDescriptor desc({view});
+ desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+ desc.cColorAttachments[0].clearColor = {data->clearCycle, 1.0f - data->clearCycle, 0.0f, 1.0f};
+
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
+ pass.EndPass();
+ }
+
+ wgpu::CommandBuffer commands = encoder.Finish();
+ queue.Submit(1, &commands);
+
+ data->swapchain.Present();
+}
+
+std::ostream& operator<<(std::ostream& o, const wgpu::SwapChainDescriptor& desc) {
+ // For now only output attachment is possible.
+ ASSERT(desc.usage == wgpu::TextureUsage::OutputAttachment);
+ o << "OutputAttachment ";
+ o << desc.width << "x" << desc.height << " ";
+
+ // For now only BGRA is allowed
+ ASSERT(desc.format == wgpu::TextureFormat::BGRA8Unorm);
+ o << "BGRA8Unorm ";
+
+ switch (desc.presentMode) {
+ case wgpu::PresentMode::Immediate:
+ o << "Immediate";
+ break;
+ case wgpu::PresentMode::Fifo:
+ o << "Fifo";
+ break;
+ case wgpu::PresentMode::Mailbox:
+ o << "Mailbox";
+ break;
+ }
+ return o;
+}
+
+void UpdateTitle(WindowData* data) {
+ std::ostringstream o;
+
+ o << data->serial << " ";
+ if (data->divisor != 1) {
+ o << "Divisor:" << data->divisor << " ";
+ }
+
+ if (data->latched) {
+ o << "Latched: (" << data->currentDesc << ") ";
+ o << "Target: (" << data->targetDesc << ")";
+ } else {
+ o << "(" << data->currentDesc << ")";
+ }
+
+ glfwSetWindowTitle(data->window, o.str().c_str());
+}
+
+void OnKeyPress(GLFWwindow* window, int key, int, int action, int) {
+ if (action != GLFW_PRESS) {
+ return;
+ }
+
+ ASSERT(windows.count(window) == 1);
+
+ WindowData* data = windows[window].get();
+ switch (key) {
+ case GLFW_KEY_W:
+ AddWindow();
+ break;
+
+ case GLFW_KEY_L:
+ data->latched = !data->latched;
+ UpdateTitle(data);
+ break;
+
+ case GLFW_KEY_R:
+ data->renderTriangle = !data->renderTriangle;
+ UpdateTitle(data);
+ break;
+
+ case GLFW_KEY_D:
+ data->divisor *= 2;
+ if (data->divisor > 32) {
+ data->divisor = 1;
+ }
+ break;
+
+ case GLFW_KEY_P:
+ switch (data->targetDesc.presentMode) {
+ case wgpu::PresentMode::Immediate:
+ data->targetDesc.presentMode = wgpu::PresentMode::Fifo;
+ break;
+ case wgpu::PresentMode::Fifo:
+ data->targetDesc.presentMode = wgpu::PresentMode::Mailbox;
+ break;
+ case wgpu::PresentMode::Mailbox:
+ data->targetDesc.presentMode = wgpu::PresentMode::Immediate;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+int main(int argc, const char* argv[]) {
+ // Setup GLFW
+ glfwSetErrorCallback([](int code, const char* message) {
+ dawn::ErrorLog() << "GLFW error " << code << " " << message;
+ });
+ if (!glfwInit()) {
+ return 1;
+ }
+
+ // Choose an adapter we like.
+ // TODO: allow switching the window between devices.
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+
+ instance = std::make_unique<dawn_native::Instance>();
+ instance->DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = instance->GetAdapters();
+ dawn_native::Adapter chosenAdapter;
+ for (dawn_native::Adapter& adapter : adapters) {
+ wgpu::AdapterProperties properties;
+ adapter.GetProperties(&properties);
+ if (properties.backendType != wgpu::BackendType::Null) {
+ chosenAdapter = adapter;
+ break;
+ }
+ }
+ ASSERT(chosenAdapter);
+
+ // Setup the device on that adapter.
+ device = wgpu::Device::Acquire(chosenAdapter.CreateDevice());
+ device.SetUncapturedErrorCallback(
+ [](WGPUErrorType errorType, const char* message, void*) {
+ const char* errorTypeName = "";
+ switch (errorType) {
+ case WGPUErrorType_Validation:
+ errorTypeName = "Validation";
+ break;
+ case WGPUErrorType_OutOfMemory:
+ errorTypeName = "Out of memory";
+ break;
+ case WGPUErrorType_Unknown:
+ errorTypeName = "Unknown";
+ break;
+ case WGPUErrorType_DeviceLost:
+ errorTypeName = "Device lost";
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ dawn::ErrorLog() << errorTypeName << " error: " << message;
+ },
+ nullptr);
+ queue = device.CreateQueue();
+
+ // The hacky pipeline to render a triangle.
+ utils::ComboRenderPipelineDescriptor pipelineDesc(device);
+ pipelineDesc.vertexStage.module =
+ utils::CreateShaderModule(device, utils::SingleShaderStage::Vertex, R"(
+ #version 450
+ const vec2 pos[3] = vec2[3](vec2(0.0f, 0.5f), vec2(-0.5f, -0.5f), vec2(0.5f, -0.5f));
+ void main() {
+ gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);
+ })");
+ pipelineDesc.cFragmentStage.module =
+ utils::CreateShaderModule(device, utils::SingleShaderStage::Fragment, R"(
+ #version 450
+ layout(location = 0) out vec4 fragColor;
+ void main() {
+ fragColor = vec4(1.0, 0.0, 0.0, 1.0);
+ })");
+ pipelineDesc.colorStateCount = 1;
+ // BGRA shouldn't be hardcoded. Consider having a map[format -> pipeline].
+ pipelineDesc.cColorStates[0].format = wgpu::TextureFormat::BGRA8Unorm;
+ trianglePipeline = device.CreateRenderPipeline(&pipelineDesc);
+
+ // Craete the first window, since the example exits when there are no windows.
+ AddWindow();
+
+ while (windows.size() != 0) {
+ glfwPollEvents();
+
+ for (auto it = windows.begin(); it != windows.end();) {
+ GLFWwindow* window = it->first;
+
+ if (glfwWindowShouldClose(window)) {
+ glfwDestroyWindow(window);
+ it = windows.erase(it);
+ } else {
+ it++;
+ }
+ }
+
+ for (auto& it : windows) {
+ WindowData* data = it.second.get();
+
+ SyncFromWindow(data);
+ if (!IsSameDescriptor(data->currentDesc, data->targetDesc) && !data->latched) {
+ data->swapchain = device.CreateSwapChain(data->surface, &data->targetDesc);
+ data->currentDesc = data->targetDesc;
+ }
+ UpdateTitle(data);
+ DoRender(data);
+ }
+ }
+}
diff --git a/chromium/third_party/dawn/examples/SampleUtils.cpp b/chromium/third_party/dawn/examples/SampleUtils.cpp
index 46d6c9a10c3..0e38786344d 100644
--- a/chromium/third_party/dawn/examples/SampleUtils.cpp
+++ b/chromium/third_party/dawn/examples/SampleUtils.cpp
@@ -15,8 +15,10 @@
#include "SampleUtils.h"
#include "common/Assert.h"
+#include "common/Log.h"
#include "common/Platform.h"
#include "utils/BackendBinding.h"
+#include "utils/GLFWUtils.h"
#include "utils/TerribleCommandBuffer.h"
#include <dawn/dawn_proc.h>
@@ -28,31 +30,31 @@
#include <algorithm>
#include <cstring>
-#include <iostream>
void PrintDeviceError(WGPUErrorType errorType, const char* message, void*) {
+ const char* errorTypeName = "";
switch (errorType) {
case WGPUErrorType_Validation:
- std::cout << "Validation ";
+ errorTypeName = "Validation";
break;
case WGPUErrorType_OutOfMemory:
- std::cout << "Out of memory ";
+ errorTypeName = "Out of memory";
break;
case WGPUErrorType_Unknown:
- std::cout << "Unknown ";
+ errorTypeName = "Unknown";
break;
case WGPUErrorType_DeviceLost:
- std::cout << "Device lost ";
+ errorTypeName = "Device lost";
break;
default:
UNREACHABLE();
return;
}
- std::cout << "error: " << message << std::endl;
+ dawn::ErrorLog() << errorTypeName << " error: " << message;
}
void PrintGLFWError(int code, const char* message) {
- std::cout << "GLFW error: " << code << " - " << message << std::endl;
+ dawn::ErrorLog() << "GLFW error: " << code << " - " << message;
}
enum class CmdBufType {
@@ -64,13 +66,13 @@ enum class CmdBufType {
// Default to D3D12, Metal, Vulkan, OpenGL in that order as D3D12 and Metal are the preferred on
// their respective platforms, and Vulkan is preferred to OpenGL
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- static dawn_native::BackendType backendType = dawn_native::BackendType::D3D12;
+static wgpu::BackendType backendType = wgpu::BackendType::D3D12;
#elif defined(DAWN_ENABLE_BACKEND_METAL)
- static dawn_native::BackendType backendType = dawn_native::BackendType::Metal;
-#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
- static dawn_native::BackendType backendType = dawn_native::BackendType::OpenGL;
+static wgpu::BackendType backendType = wgpu::BackendType::Metal;
#elif defined(DAWN_ENABLE_BACKEND_VULKAN)
- static dawn_native::BackendType backendType = dawn_native::BackendType::Vulkan;
+static wgpu::BackendType backendType = wgpu::BackendType::Vulkan;
+#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
+static wgpu::BackendType backendType = wgpu::BackendType::OpenGL;
#else
#error
#endif
@@ -108,8 +110,10 @@ wgpu::Device CreateCppDawnDevice() {
std::vector<dawn_native::Adapter> adapters = instance->GetAdapters();
auto adapterIt = std::find_if(adapters.begin(), adapters.end(),
[](const dawn_native::Adapter adapter) -> bool {
- return adapter.GetBackendType() == backendType;
- });
+ wgpu::AdapterProperties properties;
+ adapter.GetProperties(&properties);
+ return properties.backendType == backendType;
+ });
ASSERT(adapterIt != adapters.end());
backendAdapter = *adapterIt;
}
@@ -150,7 +154,7 @@ wgpu::Device CreateCppDawnDevice() {
wireClient = new dawn_wire::WireClient(clientDesc);
WGPUDevice clientDevice = wireClient->GetDevice();
- DawnProcTable clientProcs = wireClient->GetProcs();
+ DawnProcTable clientProcs = dawn_wire::WireClient::GetProcs();
s2cBuf->SetHandler(wireClient);
procs = clientProcs;
@@ -176,7 +180,7 @@ wgpu::TextureFormat GetPreferredSwapChainTextureFormat() {
wgpu::SwapChain GetSwapChain(const wgpu::Device& device) {
wgpu::SwapChainDescriptor swapChainDesc;
swapChainDesc.implementation = GetSwapChainImplementation();
- return device.CreateSwapChain(&swapChainDesc);
+ return device.CreateSwapChain(nullptr, &swapChainDesc);
}
wgpu::TextureView CreateDefaultDepthStencilView(const wgpu::Device& device) {
@@ -199,23 +203,23 @@ bool InitSample(int argc, const char** argv) {
if (std::string("-b") == argv[i] || std::string("--backend") == argv[i]) {
i++;
if (i < argc && std::string("d3d12") == argv[i]) {
- backendType = dawn_native::BackendType::D3D12;
+ backendType = wgpu::BackendType::D3D12;
continue;
}
if (i < argc && std::string("metal") == argv[i]) {
- backendType = dawn_native::BackendType::Metal;
+ backendType = wgpu::BackendType::Metal;
continue;
}
if (i < argc && std::string("null") == argv[i]) {
- backendType = dawn_native::BackendType::Null;
+ backendType = wgpu::BackendType::Null;
continue;
}
if (i < argc && std::string("opengl") == argv[i]) {
- backendType = dawn_native::BackendType::OpenGL;
+ backendType = wgpu::BackendType::OpenGL;
continue;
}
if (i < argc && std::string("vulkan") == argv[i]) {
- backendType = dawn_native::BackendType::Vulkan;
+ backendType = wgpu::BackendType::Vulkan;
continue;
}
fprintf(stderr, "--backend expects a backend name (opengl, metal, d3d12, null, vulkan)\n");
diff --git a/chromium/third_party/dawn/generator/CMakeLists.txt b/chromium/third_party/dawn/generator/CMakeLists.txt
new file mode 100644
index 00000000000..c21359c8dde
--- /dev/null
+++ b/chromium/third_party/dawn/generator/CMakeLists.txt
@@ -0,0 +1,116 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+find_package(PythonInterp REQUIRED)
+message(STATUS "Dawn: using python at ${PYTHON_EXECUTABLE}")
+
+# Check for Jinja2
+if (NOT DAWN_JINJA2_DIR)
+ message(STATUS "Dawn: Using system jinja2")
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import jinja2"
+ RESULT_VARIABLE RET
+ )
+ if (NOT RET EQUAL 0)
+ message(FATAL_ERROR "Dawn: Missing dependencies for code generation, please ensure you have python-jinja2 installed.")
+ endif()
+else()
+ message(STATUS "Dawn: Using jinja2 at ${DAWN_JINJA2_DIR}")
+endif()
+
+# Function to invoke a generator_lib.py generator.
+# - SCRIPT is the name of the script to call
+# - ARGS are the extra arguments to pass to the script in addition to the base generator_lib.py arguments
+# - PRINT_NAME is the name to use when outputting status or errors
+# - RESULT_VARIABLE will be modified to contain the list of files generated by this generator
+function(DawnGenerator)
+ set(oneValueArgs SCRIPT RESULT_VARIABLE PRINT_NAME)
+ set(multiValueArgs ARGS)
+ cmake_parse_arguments(G "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ # Build the set of args common to all invocation of that generator.
+ set(BASE_ARGS
+ ${PYTHON_EXECUTABLE}
+ ${G_SCRIPT}
+ --template-dir
+ "${DAWN_TEMPLATE_DIR}"
+ --root-dir
+ "${Dawn_SOURCE_DIR}"
+ --output-dir
+ "${DAWN_BUILD_GEN_DIR}"
+ ${G_ARGS}
+ )
+ if (DAWN_JINJA2_DIR)
+ list(APPEND BASE_ARGS --jinja2-path ${DAWN_JINJA2_DIR})
+ endif()
+
+ # Call the generator to get the list of its dependencies.
+ execute_process(
+ COMMAND ${BASE_ARGS} --print-cmake-dependencies
+ OUTPUT_VARIABLE DEPENDENCIES
+ RESULT_VARIABLE RET
+ )
+ if (NOT RET EQUAL 0)
+ message(FATAL_ERROR "Dawn: Failed to get the dependencies for ${G_PRINT_NAME}.")
+ endif()
+
+ # Ask CMake to re-run if any of the dependencies changed as it might modify the build graph.
+ if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
+ set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS ${DEPENDENCIES})
+ endif()
+
+ # Call the generator to get the list of its outputs.
+ execute_process(
+ COMMAND ${BASE_ARGS} --print-cmake-outputs
+ OUTPUT_VARIABLE OUTPUTS
+ RESULT_VARIABLE RET
+ )
+ if (NOT RET EQUAL 0)
+ message(FATAL_ERROR "Dawn: Failed to get the outputs for ${G_PRINT_NAME}.")
+ endif()
+
+ # Add the custom command that calls the generator.
+ add_custom_command(
+ COMMAND ${BASE_ARGS}
+ DEPENDS ${DEPENDENCIES}
+ OUTPUT ${OUTPUTS}
+ COMMENT "Dawn: Generating files for ${G_PRINT_NAME}."
+ )
+
+ # Return the list of outputs.
+ set(${G_RESULT_VARIABLE} ${OUTPUTS} PARENT_SCOPE)
+endfunction()
+
+# Helper function to call dawn_generator.py:
+# - TARGET is the generator target to build
+# - PRINT_NAME and RESULT_VARIABLE are like for DawnGenerator
+function(DawnJSONGenerator)
+ set(oneValueArgs TARGET RESULT_VARIABLE)
+ cmake_parse_arguments(G "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ DawnGenerator(
+ SCRIPT "${Dawn_SOURCE_DIR}/generator/dawn_json_generator.py"
+ ARGS --dawn-json
+ "${Dawn_SOURCE_DIR}/dawn.json"
+ --wire-json
+ "${Dawn_SOURCE_DIR}/dawn_wire.json"
+ --targets
+ ${G_TARGET}
+ RESULT_VARIABLE RET
+ ${G_UNPARSED_ARGUMENTS}
+ )
+
+ # Forward the result up one more scope
+ set(${G_RESULT_VARIABLE} ${RET} PARENT_SCOPE)
+endfunction()
diff --git a/chromium/third_party/dawn/generator/dawn_json_generator.py b/chromium/third_party/dawn/generator/dawn_json_generator.py
index e769858b1bc..6ff4280da8f 100644
--- a/chromium/third_party/dawn/generator/dawn_json_generator.py
+++ b/chromium/third_party/dawn/generator/dawn_json_generator.py
@@ -51,6 +51,14 @@ class Name:
def snake_case(self):
return '_'.join(self.chunks)
+ def js_enum_case(self):
+ result = self.chunks[0].lower()
+ for chunk in self.chunks[1:]:
+ if not result[-1].isdigit():
+ result += '-'
+ result += chunk.lower()
+ return result
+
def concat_names(*names):
return ' '.join([name.canonical_case() for name in names])
@@ -60,12 +68,26 @@ class Type:
self.dict_name = name
self.name = Name(name, native=native)
self.category = json_data['category']
+ self.javascript = self.json_data.get('javascript', True)
-EnumValue = namedtuple('EnumValue', ['name', 'value', 'valid'])
+EnumValue = namedtuple('EnumValue', ['name', 'value', 'valid', 'jsrepr'])
class EnumType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
- self.values = [EnumValue(Name(m['name']), m['value'], m.get('valid', True)) for m in self.json_data['values']]
+
+ self.values = []
+ self.contiguousFromZero = True
+ lastValue = -1
+ for m in self.json_data['values']:
+ value = m['value']
+ if value != lastValue + 1:
+ self.contiguousFromZero = False
+ lastValue = value
+ self.values.append(EnumValue(
+ Name(m['name']),
+ value,
+ m.get('valid', True),
+ m.get('jsrepr', None)))
# Assert that all values are unique in enums
all_values = set()
@@ -140,7 +162,11 @@ class StructureType(Record, Type):
def __init__(self, name, json_data):
Record.__init__(self, name)
Type.__init__(self, name, json_data)
+ self.chained = json_data.get("chained", False)
self.extensible = json_data.get("extensible", False)
+ # Chained structs inherit from wgpu::ChainedStruct which has nextInChain so setting
+ # both extensible and chained would result in two nextInChain members.
+ assert(not (self.extensible and self.chained))
class Command(Record):
def __init__(self, name, members=None):
@@ -373,6 +399,10 @@ def as_cppType(name):
else:
return name.CamelCase()
+def as_jsEnumValue(value):
+ if value.jsrepr: return value.jsrepr
+ return "'" + value.name.js_enum_case() + "'"
+
def convert_cType_to_cppType(typ, annotation, arg, indent=0):
if typ.category == 'native':
return arg
@@ -518,6 +548,7 @@ class MultiGeneratorFromDawnJSON(Generator):
'as_cType': as_cType,
'as_cTypeDawn': as_cTypeDawn,
'as_cppType': as_cppType,
+ 'as_jsEnumValue': as_jsEnumValue,
'convert_cType_to_cppType': convert_cType_to_cppType,
'as_varName': as_varName,
'decorate': decorate,
@@ -529,12 +560,10 @@ class MultiGeneratorFromDawnJSON(Generator):
if 'dawn_headers' in targets:
renders.append(FileRender('webgpu.h', 'src/include/dawn/webgpu.h', [base_params, api_params]))
- renders.append(FileRender('dawn.h', 'src/include/dawn/dawn.h', [base_params, api_params]))
renders.append(FileRender('dawn_proc_table.h', 'src/include/dawn/dawn_proc_table.h', [base_params, api_params]))
if 'dawncpp_headers' in targets:
renders.append(FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h', [base_params, api_params]))
- renders.append(FileRender('dawncpp.h', 'src/include/dawn/dawncpp.h', [base_params, api_params]))
if 'dawn_proc' in targets:
renders.append(FileRender('dawn_proc.c', 'src/dawn/dawn_proc.c', [base_params, api_params]))
@@ -542,6 +571,10 @@ class MultiGeneratorFromDawnJSON(Generator):
if 'dawncpp' in targets:
renders.append(FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp', [base_params, api_params]))
+ if 'emscripten_bits' in targets:
+ renders.append(FileRender('webgpu_struct_info.json', 'src/dawn/webgpu_struct_info.json', [base_params, api_params]))
+ renders.append(FileRender('library_webgpu_enum_tables.js', 'src/dawn/library_webgpu_enum_tables.js', [base_params, api_params]))
+
if 'mock_webgpu' in targets:
mock_params = [
base_params,
diff --git a/chromium/third_party/dawn/generator/extract_json.py b/chromium/third_party/dawn/generator/extract_json.py
index 2cc4b3d6f61..40312f9f667 100644
--- a/chromium/third_party/dawn/generator/extract_json.py
+++ b/chromium/third_party/dawn/generator/extract_json.py
@@ -24,7 +24,7 @@ if __name__ == "__main__":
output_dir = sys.argv[2]
- for (name, content) in files.iteritems():
+ for (name, content) in files.items():
output_file = output_dir + os.path.sep + name
directory = os.path.dirname(output_file)
diff --git a/chromium/third_party/dawn/generator/generator_lib.py b/chromium/third_party/dawn/generator/generator_lib.py
index 9dcd1793121..bda4c1e69f6 100644
--- a/chromium/third_party/dawn/generator/generator_lib.py
+++ b/chromium/third_party/dawn/generator/generator_lib.py
@@ -117,6 +117,11 @@ class _PreprocessingLoader(jinja2.BaseLoader):
result = []
indentation_level = 0
+ # Filter lines that are pure comments. line_comment_prefix is not enough because it removes
+ # the comment but doesn't completely remove the line, resulting in more verbose output.
+ lines = filter(lambda line: not line.strip().startswith('//*'), lines)
+
+ # Remove indentation templates have for the Jinja control flow.
for line in lines:
# The capture in the regex adds one element per block start or end so we divide by two
# there is also an extra line chunk corresponding to the line end, so we substract it.
@@ -201,22 +206,39 @@ def _compute_python_dependencies(root_dir = None):
def run_generator(generator):
parser = argparse.ArgumentParser(
description = generator.get_description(),
- formatter_class = argparse.ArgumentDefaultsHelpFormatter
+ formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
generator.add_commandline_arguments(parser);
- parser.add_argument('-t', '--template-dir', default='templates', type=str, help='Directory with template files.')
+ parser.add_argument('--template-dir', default='templates', type=str, help='Directory with template files.')
parser.add_argument(kJinja2Path, default=None, type=str, help='Additional python path to set before loading Jinja2')
parser.add_argument('--output-json-tarball', default=None, type=str, help='Name of the "JSON tarball" to create (tar is too annoying to use in python).')
parser.add_argument('--depfile', default=None, type=str, help='Name of the Ninja depfile to create for the JSON tarball')
parser.add_argument('--expected-outputs-file', default=None, type=str, help="File to compare outputs with and fail if it doesn't match")
parser.add_argument('--root-dir', default=None, type=str, help='Optional source root directory for Python dependency computations')
parser.add_argument('--allowed-output-dirs-file', default=None, type=str, help="File containing a list of allowed directories where files can be output.")
+ parser.add_argument('--print-cmake-dependencies', default=False, action="store_true", help="Prints a semi-colon separated list of dependencies to stdout and exits.")
+ parser.add_argument('--print-cmake-outputs', default=False, action="store_true", help="Prints a semi-colon separated list of outputs to stdout and exits.")
+ parser.add_argument('--output-dir', default=None, type=str, help='Directory where to output generate files.')
args = parser.parse_args()
renders = generator.get_file_renders(args);
+ # Output a list of all dependencies for CMake or the tarball for GN/Ninja.
+ if args.depfile != None or args.print_cmake_dependencies:
+ dependencies = generator.get_dependencies(args)
+ dependencies += [args.template_dir + os.path.sep + render.template for render in renders]
+ dependencies += _compute_python_dependencies(args.root_dir)
+
+ if args.depfile != None:
+ with open(args.depfile, 'w') as f:
+ f.write(args.output_json_tarball + ": " + " ".join(dependencies))
+
+ if args.print_cmake_dependencies:
+ sys.stdout.write(";".join(dependencies))
+ return 0
+
# The caller wants to assert that the outputs are what it expects.
# Load the file and compare with our renders.
if args.expected_outputs_file != None:
@@ -230,6 +252,11 @@ def run_generator(generator):
print("Actual output:\n " + repr(sorted(actual)))
return 1
+ # Print the list of all the outputs for cmake.
+ if args.print_cmake_outputs:
+ sys.stdout.write(";".join([os.path.join(args.output_dir, render.output) for render in renders]))
+ return 0
+
outputs = _do_renders(renders, args.template_dir)
# The caller wants to assert that the outputs are only in specific directories.
@@ -252,7 +279,7 @@ def run_generator(generator):
print(' "{}"'.format(directory))
return 1
- # Output the tarball and its depfile
+ # Output the JSON tarball
if args.output_json_tarball != None:
json_root = {}
for output in outputs:
@@ -261,11 +288,14 @@ def run_generator(generator):
with open(args.output_json_tarball, 'w') as f:
f.write(json.dumps(json_root))
- # Output a list of all dependencies for the tarball for Ninja.
- if args.depfile != None:
- dependencies = generator.get_dependencies(args)
- dependencies += [args.template_dir + os.path.sep + render.template for render in renders]
- dependencies += _compute_python_dependencies(args.root_dir)
+ # Output the files directly.
+ if args.output_dir != None:
+ for output in outputs:
+ output_path = os.path.join(args.output_dir, output.name)
+
+ directory = os.path.dirname(output_path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
- with open(args.depfile, 'w') as f:
- f.write(args.output_json_tarball + ": " + " ".join(dependencies))
+ with open(output_path, 'w') as outfile:
+ outfile.write(output.content)
diff --git a/chromium/third_party/dawn/generator/templates/dawn.h b/chromium/third_party/dawn/generator/templates/dawn.h
deleted file mode 100644
index bc788350061..00000000000
--- a/chromium/third_party/dawn/generator/templates/dawn.h
+++ /dev/null
@@ -1,61 +0,0 @@
-//* Copyright 2017 The Dawn Authors
-//*
-//* Licensed under the Apache License, Version 2.0 (the "License");
-//* you may not use this file except in compliance with the License.
-//* You may obtain a copy of the License at
-//*
-//* http://www.apache.org/licenses/LICENSE-2.0
-//*
-//* Unless required by applicable law or agreed to in writing, software
-//* distributed under the License is distributed on an "AS IS" BASIS,
-//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//* See the License for the specific language governing permissions and
-//* limitations under the License.
-
-// This temporary header translates all the previous Dawn C API to the webgpu.h
-// API so that during a small transition period both headers are supported.
-
-#ifndef DAWN_DAWN_H_
-#define DAWN_DAWN_H_
-
-#include "webgpu.h"
-
-#define DAWN_WHOLE_SIZE WGPU_WHOLE_SIZE
-
-{% for type in by_category["object"] %}
- typedef {{as_cType(type.name)}} {{as_cTypeDawn(type.name)}};
- typedef {{as_cType(type.name)}}Impl {{as_cTypeDawn(type.name)}}Impl;
- {% for method in c_methods(type) %}
- typedef {{as_cProc(type.name, method.name)}} {{as_cProcDawn(type.name, method.name)}};
- #define {{as_cMethodDawn(type.name, method.name)}} {{as_cMethod(type.name, method.name)}}
- {% endfor %}
-{% endfor %}
-
-{% for type in by_category["enum"] + by_category["bitmask"] %}
- typedef {{as_cType(type.name)}} {{as_cTypeDawn(type.name)}};
- {% if type.category == "bitmask" %}
- typedef {{as_cType(type.name)}}Flags {{as_cTypeDawn(type.name)}}Flags;
- {% endif %}
-
- {% for value in type.values %}
- #define {{as_cEnumDawn(type.name, value.name)}} {{as_cEnum(type.name, value.name)}}
- {% endfor %}
- #define {{as_cEnumDawn(type.name, Name("force32"))}} {{as_cEnum(type.name, Name("force32"))}}
-{% endfor %}
-
-{% for type in by_category["structure"] %}
- typedef {{as_cType(type.name)}} {{as_cTypeDawn(type.name)}};
-{% endfor %}
-
-typedef WGPUBufferCreateMappedCallback DawnBufferCreateMappedCallback;
-typedef WGPUBufferMapReadCallback DawnBufferMapReadCallback;
-typedef WGPUBufferMapWriteCallback DawnBufferMapWriteCallback;
-typedef WGPUFenceOnCompletionCallback DawnFenceOnCompletionCallback;
-typedef WGPUErrorCallback DawnErrorCallback;
-
-typedef WGPUProc DawnProc;
-
-typedef WGPUProcGetProcAddress DawnProcGetProcAddress;
-#define DawnGetProcAddress WGPUGetProcAddress
-
-#endif // DAWN_DAWN_H_
diff --git a/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp b/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
index 9088ce32794..88b780c9074 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_native/ProcTable.cpp
@@ -33,6 +33,7 @@ namespace dawn_native {
using FenceBase = Fence;
using RenderPassEncoderBase = RenderPassEncoder;
using RenderBundleEncoderBase = RenderBundleEncoder;
+ using SurfaceBase = Surface;
namespace {
@@ -92,6 +93,12 @@ namespace dawn_native {
static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
}
+ WGPUInstance NativeCreateInstance(WGPUInstanceDescriptor const* cDescriptor) {
+ const dawn_native::InstanceDescriptor* descriptor =
+ reinterpret_cast<const dawn_native::InstanceDescriptor*>(cDescriptor);
+ return reinterpret_cast<WGPUInstance>(InstanceBase::Create(descriptor));
+ }
+
WGPUProc NativeGetProcAddress(WGPUDevice, const char* procName) {
if (procName == nullptr) {
return nullptr;
@@ -107,10 +114,15 @@ namespace dawn_native {
return entry->proc;
}
+ // Special case the two free-standing functions of the API.
if (strcmp(procName, "wgpuGetProcAddress") == 0) {
return reinterpret_cast<WGPUProc>(NativeGetProcAddress);
}
+ if (strcmp(procName, "wgpuCreateInstance") == 0) {
+ return reinterpret_cast<WGPUProc>(NativeCreateInstance);
+ }
+
return nullptr;
}
@@ -126,6 +138,7 @@ namespace dawn_native {
DawnProcTable GetProcsAutogen() {
DawnProcTable table;
table.getProcAddress = NativeGetProcAddress;
+ table.createInstance = NativeCreateInstance;
{% for type in by_category["object"] %}
{% for method in c_methods(type) %}
table.{{as_varName(type.name, method.name)}} = Native{{as_MethodSuffix(type.name, method.name)}};
diff --git a/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.cpp b/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.cpp
index b327f70733a..83f24eea43e 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.cpp
@@ -16,6 +16,15 @@
namespace dawn_native {
+ static_assert(sizeof(ChainedStruct) == sizeof(WGPUChainedStruct),
+ "sizeof mismatch for ChainedStruct");
+ static_assert(alignof(ChainedStruct) == alignof(WGPUChainedStruct),
+ "alignof mismatch for ChainedStruct");
+ static_assert(offsetof(ChainedStruct, nextInChain) == offsetof(WGPUChainedStruct, next),
+ "offsetof mismatch for ChainedStruct::nextInChain");
+ static_assert(offsetof(ChainedStruct, sType) == offsetof(WGPUChainedStruct, sType),
+ "offsetof mismatch for ChainedStruct::sType");
+
{% for type in by_category["structure"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
diff --git a/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.h b/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.h
index 1241a0085be..561433675e1 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.h
+++ b/chromium/third_party/dawn/generator/templates/dawn_native/wgpu_structs.h
@@ -32,13 +32,25 @@ namespace dawn_native {
{%- endif -%}
{%- endmacro %}
+ struct ChainedStruct {
+ ChainedStruct const * nextInChain = nullptr;
+ wgpu::SType sType = wgpu::SType::Invalid;
+ };
+
{% for type in by_category["structure"] %}
- struct {{as_cppType(type.name)}} {
+ {% if type.chained %}
+ struct {{as_cppType(type.name)}} : ChainedStruct {
+ {{as_cppType(type.name)}}() {
+ sType = wgpu::SType::{{type.name.CamelCase()}};
+ }
+ {% else %}
+ struct {{as_cppType(type.name)}} {
+ {% endif %}
{% if type.extensible %}
- const void* nextInChain = nullptr;
+ ChainedStruct const * nextInChain = nullptr;
{% endif %}
{% for member in type.members %}
- {{as_annotated_frontendType(member)}} {{render_cpp_default_value(member)}};
+ {{as_annotated_frontendType(member)}} {{render_cpp_default_value(member)}};
{% endfor %}
};
diff --git a/chromium/third_party/dawn/generator/templates/dawn_proc.c b/chromium/third_party/dawn/generator/templates/dawn_proc.c
index 6d6153d273c..9d77755de74 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_proc.c
+++ b/chromium/third_party/dawn/generator/templates/dawn_proc.c
@@ -26,6 +26,10 @@ void dawnProcSetProcs(const DawnProcTable* procs_) {
}
}
+WGPUInstance wgpuCreateInstance(WGPUInstanceDescriptor const * descriptor) {
+ return procs.createInstance(descriptor);
+}
+
WGPUProc wgpuGetProcAddress(WGPUDevice device, const char* procName) {
return procs.getProcAddress(device, procName);
}
diff --git a/chromium/third_party/dawn/generator/templates/dawn_proc_table.h b/chromium/third_party/dawn/generator/templates/dawn_proc_table.h
index 75304b87f3c..197f3001bbb 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_proc_table.h
+++ b/chromium/third_party/dawn/generator/templates/dawn_proc_table.h
@@ -19,6 +19,7 @@
typedef struct DawnProcTable {
WGPUProcGetProcAddress getProcAddress;
+ WGPUProcCreateInstance createInstance;
{% for type in by_category["object"] %}
{% for method in c_methods(type) %}
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
index 054d2af0f52..1659cdf9c29 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
@@ -17,6 +17,7 @@
#include "dawn_wire/client/Client.h"
#include <algorithm>
+#include <cstring>
#include <string>
#include <vector>
@@ -94,6 +95,11 @@ namespace dawn_wire { namespace client {
{% endfor %}
namespace {
+ WGPUInstance ClientCreateInstance(WGPUInstanceDescriptor const* descriptor) {
+ UNREACHABLE();
+ return nullptr;
+ }
+
struct ProcEntry {
WGPUProc proc;
const char* name;
@@ -121,10 +127,15 @@ namespace dawn_wire { namespace client {
return entry->proc;
}
+ // Special case the two free-standing functions of the API.
if (strcmp(procName, "wgpuGetProcAddress") == 0) {
return reinterpret_cast<WGPUProc>(ClientGetProcAddress);
}
+ if (strcmp(procName, "wgpuCreateInstance") == 0) {
+ return reinterpret_cast<WGPUProc>(ClientCreateInstance);
+ }
+
return nullptr;
}
@@ -145,6 +156,7 @@ namespace dawn_wire { namespace client {
DawnProcTable GetProcs() {
DawnProcTable table;
table.getProcAddress = ClientGetProcAddress;
+ table.createInstance = ClientCreateInstance;
{% for type in by_category["object"] %}
{% for method in c_methods(type) %}
{% set suffix = as_MethodSuffix(type.name, method.name) %}
diff --git a/chromium/third_party/dawn/generator/templates/dawncpp.h b/chromium/third_party/dawn/generator/templates/dawncpp.h
deleted file mode 100644
index a944e581a0f..00000000000
--- a/chromium/third_party/dawn/generator/templates/dawncpp.h
+++ /dev/null
@@ -1,54 +0,0 @@
-//* Copyright 2017 The Dawn Authors
-//*
-//* Licensed under the Apache License, Version 2.0 (the "License");
-//* you may not use this file except in compliance with the License.
-//* You may obtain a copy of the License at
-//*
-//* http://www.apache.org/licenses/LICENSE-2.0
-//*
-//* Unless required by applicable law or agreed to in writing, software
-//* distributed under the License is distributed on an "AS IS" BASIS,
-//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//* See the License for the specific language governing permissions and
-//* limitations under the License.
-
-// This temporary header translates all the previous Dawn C++ API to the webgpu_cpp.h
-// API so that during a small transition period both headers are supported.
-
-#ifndef DAWN_DAWNCPP_H_
-#define DAWN_DAWNCPP_H_
-
-#include "dawn/dawn.h"
-#include "dawn/webgpu_cpp.h"
-
-namespace dawn {
-
- static constexpr uint64_t kWholeSize = wgpu::kWholeSize;
-
- {% for type in by_category["enum"] %}
- using {{as_cppType(type.name)}} = wgpu::{{as_cppType(type.name)}};
- {% endfor %}
-
- {% for type in by_category["bitmask"] %}
- using {{as_cppType(type.name)}} = wgpu::{{as_cppType(type.name)}};
- {% endfor %}
-
- using Proc = wgpu::Proc;
- {% for type in by_category["natively defined"] %}
- using {{as_cppType(type.name)}} = wgpu::{{as_cppType(type.name)}};
- {% endfor %}
-
- {% for type in by_category["object"] %}
- using {{as_cppType(type.name)}} = wgpu::{{as_cppType(type.name)}};
- {% endfor %}
-
- {% for type in by_category["structure"] %}
- using {{as_cppType(type.name)}} = wgpu::{{as_cppType(type.name)}};
- {% endfor %}
-
- static inline Proc GetProcAddress(Device const& device, const char* procName) {
- return wgpu::GetProcAddress(device, procName);
- }
-} // namespace dawn
-
-#endif // DAWN_DAWNCPP_H_
diff --git a/chromium/third_party/dawn/generator/templates/library_webgpu_enum_tables.js b/chromium/third_party/dawn/generator/templates/library_webgpu_enum_tables.js
new file mode 100644
index 00000000000..44048ad3a9e
--- /dev/null
+++ b/chromium/third_party/dawn/generator/templates/library_webgpu_enum_tables.js
@@ -0,0 +1,35 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//* http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+//* This generator is used to produce the number-to-string mappings for
+//* Emscripten's library_webgpu.js.
+//* https://github.com/emscripten-core/emscripten/blob/master/src/library_webgpu.js
+//*
+ {% for type in by_category["enum"] if type.javascript %}
+ {{type.name.CamelCase()}}: {% if type.contiguousFromZero -%}
+ [
+ {% for value in type.values %}
+ {{as_jsEnumValue(value)}},
+ {% endfor %}
+ ]
+ {%- else -%}
+ {
+ {% for value in type.values %}
+ {{value.value}}: {{as_jsEnumValue(value)}},
+ {% endfor %}
+ }
+ {%- endif -%}
+ ,
+ {% endfor %}
diff --git a/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp b/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
index 788290b747a..edb96898b14 100644
--- a/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
+++ b/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
@@ -55,11 +55,21 @@ void ProcTableAsClass::DeviceSetUncapturedErrorCallback(WGPUDevice self,
void* userdata) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
object->deviceErrorCallback = callback;
- object->userdata1 = userdata;
+ object->userdata = userdata;
OnDeviceSetUncapturedErrorCallback(self, callback, userdata);
}
+void ProcTableAsClass::DeviceSetDeviceLostCallback(WGPUDevice self,
+ WGPUDeviceLostCallback callback,
+ void* userdata) {
+ auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
+ object->deviceLostCallback = callback;
+ object->userdata = userdata;
+
+ OnDeviceSetDeviceLostCallback(self, callback, userdata);
+}
+
bool ProcTableAsClass::DevicePopErrorScope(WGPUDevice self,
WGPUErrorCallback callback,
void* userdata) {
@@ -72,7 +82,7 @@ void ProcTableAsClass::DeviceCreateBufferMappedAsync(WGPUDevice self,
void* userdata) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
object->createBufferMappedCallback = callback;
- object->userdata1 = userdata;
+ object->userdata = userdata;
OnDeviceCreateBufferMappedAsyncCallback(self, descriptor, callback, userdata);
}
@@ -82,7 +92,7 @@ void ProcTableAsClass::BufferMapReadAsync(WGPUBuffer self,
void* userdata) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
object->mapReadCallback = callback;
- object->userdata1 = userdata;
+ object->userdata = userdata;
OnBufferMapReadAsyncCallback(self, callback, userdata);
}
@@ -92,7 +102,7 @@ void ProcTableAsClass::BufferMapWriteAsync(WGPUBuffer self,
void* userdata) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
object->mapWriteCallback = callback;
- object->userdata1 = userdata;
+ object->userdata = userdata;
OnBufferMapWriteAsyncCallback(self, callback, userdata);
}
@@ -103,7 +113,7 @@ void ProcTableAsClass::FenceOnCompletion(WGPUFence self,
void* userdata) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
object->fenceOnCompletionCallback = callback;
- object->userdata1 = userdata;
+ object->userdata = userdata;
OnFenceOnCompletionCallback(self, value, callback, userdata);
}
@@ -112,20 +122,26 @@ void ProcTableAsClass::CallDeviceErrorCallback(WGPUDevice device,
WGPUErrorType type,
const char* message) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(device);
- object->deviceErrorCallback(type, message, object->userdata1);
+ object->deviceErrorCallback(type, message, object->userdata);
}
+
+void ProcTableAsClass::CallDeviceLostCallback(WGPUDevice device, const char* message) {
+ auto object = reinterpret_cast<ProcTableAsClass::Object*>(device);
+ object->deviceLostCallback(message, object->userdata);
+}
+
void ProcTableAsClass::CallCreateBufferMappedCallback(WGPUDevice device,
WGPUBufferMapAsyncStatus status,
WGPUCreateBufferMappedResult result) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(device);
- object->createBufferMappedCallback(status, result, object->userdata1);
+ object->createBufferMappedCallback(status, result, object->userdata);
}
void ProcTableAsClass::CallMapReadCallback(WGPUBuffer buffer,
WGPUBufferMapAsyncStatus status,
const void* data,
uint64_t dataLength) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(buffer);
- object->mapReadCallback(status, data, dataLength, object->userdata1);
+ object->mapReadCallback(status, data, dataLength, object->userdata);
}
void ProcTableAsClass::CallMapWriteCallback(WGPUBuffer buffer,
@@ -133,13 +149,13 @@ void ProcTableAsClass::CallMapWriteCallback(WGPUBuffer buffer,
void* data,
uint64_t dataLength) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(buffer);
- object->mapWriteCallback(status, data, dataLength, object->userdata1);
+ object->mapWriteCallback(status, data, dataLength, object->userdata);
}
void ProcTableAsClass::CallFenceOnCompletionCallback(WGPUFence fence,
WGPUFenceCompletionStatus status) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(fence);
- object->fenceOnCompletionCallback(status, object->userdata1);
+ object->fenceOnCompletionCallback(status, object->userdata);
}
{% for type in by_category["object"] %}
diff --git a/chromium/third_party/dawn/generator/templates/mock_webgpu.h b/chromium/third_party/dawn/generator/templates/mock_webgpu.h
index 57d043d18e7..f21e1dc1d69 100644
--- a/chromium/third_party/dawn/generator/templates/mock_webgpu.h
+++ b/chromium/third_party/dawn/generator/templates/mock_webgpu.h
@@ -55,6 +55,9 @@ class ProcTableAsClass {
void DeviceSetUncapturedErrorCallback(WGPUDevice self,
WGPUErrorCallback callback,
void* userdata);
+ void DeviceSetDeviceLostCallback(WGPUDevice self,
+ WGPUDeviceLostCallback callback,
+ void* userdata);
bool DevicePopErrorScope(WGPUDevice self, WGPUErrorCallback callback, void* userdata);
void DeviceCreateBufferMappedAsync(WGPUDevice self,
const WGPUBufferDescriptor* descriptor,
@@ -75,6 +78,9 @@ class ProcTableAsClass {
virtual void OnDeviceSetUncapturedErrorCallback(WGPUDevice device,
WGPUErrorCallback callback,
void* userdata) = 0;
+ virtual void OnDeviceSetDeviceLostCallback(WGPUDevice device,
+ WGPUDeviceLostCallback callback,
+ void* userdata) = 0;
virtual bool OnDevicePopErrorScopeCallback(WGPUDevice device,
WGPUErrorCallback callback,
void* userdata) = 0;
@@ -95,6 +101,7 @@ class ProcTableAsClass {
// Calls the stored callbacks
void CallDeviceErrorCallback(WGPUDevice device, WGPUErrorType type, const char* message);
+ void CallDeviceLostCallback(WGPUDevice device, const char* message);
void CallCreateBufferMappedCallback(WGPUDevice device, WGPUBufferMapAsyncStatus status, WGPUCreateBufferMappedResult result);
void CallMapReadCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, const void* data, uint64_t dataLength);
void CallMapWriteCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength);
@@ -103,12 +110,12 @@ class ProcTableAsClass {
struct Object {
ProcTableAsClass* procs = nullptr;
WGPUErrorCallback deviceErrorCallback = nullptr;
+ WGPUDeviceLostCallback deviceLostCallback = nullptr;
WGPUBufferCreateMappedCallback createBufferMappedCallback = nullptr;
WGPUBufferMapReadCallback mapReadCallback = nullptr;
WGPUBufferMapWriteCallback mapWriteCallback = nullptr;
WGPUFenceOnCompletionCallback fenceOnCompletionCallback = nullptr;
- void* userdata1 = 0;
- void* userdata2 = 0;
+ void* userdata = 0;
};
private:
@@ -139,6 +146,8 @@ class MockProcTable : public ProcTableAsClass {
{% endfor %}
MOCK_METHOD3(OnDeviceSetUncapturedErrorCallback, void(WGPUDevice device, WGPUErrorCallback callback, void* userdata));
+ MOCK_METHOD3(OnDeviceSetDeviceLostCallback,
+ void(WGPUDevice device, WGPUDeviceLostCallback callback, void* userdata));
MOCK_METHOD3(OnDevicePopErrorScopeCallback, bool(WGPUDevice device, WGPUErrorCallback callback, void* userdata));
MOCK_METHOD4(OnDeviceCreateBufferMappedAsyncCallback, void(WGPUDevice device, const WGPUBufferDescriptor* descriptor, WGPUBufferCreateMappedCallback callback, void* userdata));
MOCK_METHOD3(OnBufferMapReadAsyncCallback, void(WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void* userdata));
diff --git a/chromium/third_party/dawn/generator/templates/opengl/OpenGLFunctionsBase.cpp b/chromium/third_party/dawn/generator/templates/opengl/OpenGLFunctionsBase.cpp
index bf5246f8f0b..79fc5b15298 100644
--- a/chromium/third_party/dawn/generator/templates/opengl/OpenGLFunctionsBase.cpp
+++ b/chromium/third_party/dawn/generator/templates/opengl/OpenGLFunctionsBase.cpp
@@ -20,7 +20,7 @@ template<typename T>
MaybeError OpenGLFunctionsBase::LoadProc(GetProcAddress getProc, T* memberProc, const char* name) {
*memberProc = reinterpret_cast<T>(getProc(name));
if (DAWN_UNLIKELY(memberProc == nullptr)) {
- return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't load GL proc: ") + name);
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't load GL proc: ") + name);
}
return {};
}
diff --git a/chromium/third_party/dawn/generator/templates/webgpu.h b/chromium/third_party/dawn/generator/templates/webgpu.h
index dbbe36a5d69..bae0a53e604 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu.h
+++ b/chromium/third_party/dawn/generator/templates/webgpu.h
@@ -1,3 +1,24 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//* http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+//* This template itself is part of the Dawn source and follows Dawn's license
+//* but the generated file is used for "WebGPU native". The template comments
+//* using //* at the top of the file are removed during generation such that
+//* the resulting file starts with the BSD 3-Clause comment.
+//*
+//*
// BSD 3-Clause License
//
// Copyright (c) 2019, "WebGPU native" developers
@@ -52,7 +73,7 @@
#include <stddef.h>
#include <stdbool.h>
-const uint64_t WGPU_WHOLE_SIZE = 0xffffffffffffffffULL; // UINT64_MAX
+#define WGPU_WHOLE_SIZE (0xffffffffffffffffULL)
typedef uint32_t WGPUFlags;
@@ -73,10 +94,18 @@ typedef uint32_t WGPUFlags;
{% endfor %}
+typedef struct WGPUChainedStruct {
+ struct WGPUChainedStruct const * next;
+ WGPUSType sType;
+} WGPUChainedStruct;
+
{% for type in by_category["structure"] %}
typedef struct {{as_cType(type.name)}} {
{% if type.extensible %}
- void const * nextInChain;
+ WGPUChainedStruct const * nextInChain;
+ {% endif %}
+ {% if type.chained %}
+ WGPUChainedStruct chain;
{% endif %}
{% for member in type.members %}
{{as_annotated_cType(member)}};
@@ -101,6 +130,7 @@ typedef void (*WGPUProc)();
#if !defined(WGPU_SKIP_PROCS)
+typedef WGPUInstance (*WGPUProcCreateInstance)(WGPUInstanceDescriptor const * descriptor);
typedef WGPUProc (*WGPUProcGetProcAddress)(WGPUDevice device, char const * procName);
{% for type in by_category["object"] if len(c_methods(type)) > 0 %}
@@ -119,6 +149,7 @@ typedef WGPUProc (*WGPUProcGetProcAddress)(WGPUDevice device, char const * procN
#if !defined(WGPU_SKIP_DECLARATIONS)
+WGPU_EXPORT WGPUInstance wgpuCreateInstance(WGPUInstanceDescriptor const * descriptor);
WGPU_EXPORT WGPUProc wgpuGetProcAddress(WGPUDevice device, char const * procName);
{% for type in by_category["object"] if len(c_methods(type)) > 0 %}
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp b/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
index 1f4bbc7912c..7d22f640af4 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
+++ b/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
@@ -42,6 +42,15 @@ namespace wgpu {
{% endfor %}
+ static_assert(sizeof(ChainedStruct) == sizeof(WGPUChainedStruct),
+ "sizeof mismatch for ChainedStruct");
+ static_assert(alignof(ChainedStruct) == alignof(WGPUChainedStruct),
+ "alignof mismatch for ChainedStruct");
+ static_assert(offsetof(ChainedStruct, nextInChain) == offsetof(WGPUChainedStruct, next),
+ "offsetof mismatch for ChainedStruct::nextInChain");
+ static_assert(offsetof(ChainedStruct, sType) == offsetof(WGPUChainedStruct, sType),
+ "offsetof mismatch for ChainedStruct::sType");
+
{% for type in by_category["structure"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
@@ -125,6 +134,12 @@ namespace wgpu {
{% endfor %}
+ Instance CreateInstance(const InstanceDescriptor* descriptor) {
+ const WGPUInstanceDescriptor* cDescriptor =
+ reinterpret_cast<const WGPUInstanceDescriptor*>(descriptor);
+ return Instance::Acquire(wgpuCreateInstance(cDescriptor));
+ }
+
Proc GetProcAddress(Device const& device, const char* procName) {
return reinterpret_cast<Proc>(wgpuGetProcAddress(device.Get(), procName));
}
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_cpp.h b/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
index b110f5645dc..6bfcdb7054c 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
+++ b/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
@@ -183,12 +183,25 @@ namespace wgpu {
{% endfor %}
+ Instance CreateInstance(InstanceDescriptor const * descriptor = nullptr);
Proc GetProcAddress(Device const& device, const char* procName);
+ struct ChainedStruct {
+ ChainedStruct const * nextInChain = nullptr;
+ SType sType = SType::Invalid;
+ };
+
{% for type in by_category["structure"] %}
- struct {{as_cppType(type.name)}} {
+ {% if type.chained %}
+ struct {{as_cppType(type.name)}} : ChainedStruct {
+ {{as_cppType(type.name)}}() {
+ sType = SType::{{type.name.CamelCase()}};
+ }
+ {% else %}
+ struct {{as_cppType(type.name)}} {
+ {% endif %}
{% if type.extensible %}
- const void* nextInChain = nullptr;
+ ChainedStruct const * nextInChain = nullptr;
{% endif %}
{% for member in type.members %}
{{as_annotated_cppType(member)}}{{render_cpp_default_value(member)}};
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json b/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json
new file mode 100644
index 00000000000..5120ba8d5aa
--- /dev/null
+++ b/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json
@@ -0,0 +1,51 @@
+//* Copyright 2020 The Dawn Authors
+//*
+//* Licensed under the Apache License, Version 2.0 (the "License");
+//* you may not use this file except in compliance with the License.
+//* You may obtain a copy of the License at
+//*
+//* http://www.apache.org/licenses/LICENSE-2.0
+//*
+//* Unless required by applicable law or agreed to in writing, software
+//* distributed under the License is distributed on an "AS IS" BASIS,
+//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//* See the License for the specific language governing permissions and
+//* limitations under the License.
+//*
+//*
+//* This generator is used to produce part of Emscripten's struct_info.json,
+//* which is a list of struct fields that it uses to generate field offset
+//* information for its own code generators.
+//* https://github.com/emscripten-core/emscripten/blob/master/src/struct_info.json
+//*
+ {
+ "file": "webgpu/webgpu.h",
+ "defines": [],
+ "structs": {
+ "WGPUChainedStruct": [
+ "nextInChain",
+ "sType"
+ ],
+ {% for type in by_category["structure"] if type.javascript %}
+ "{{as_cType(type.name)}}": [
+ {% if type.chained %}
+ "nextInChain",
+ "sType"
+ {%- elif type.extensible %}
+ "nextInChain"
+ {%- endif %}
+ {% for member in type.members -%}
+ {%- if (type.chained or type.extensible) or not loop.first -%}
+ ,
+ {% endif %}
+ "{{as_varName(member.name)}}"
+ {%- endfor %}
+
+ ]
+ {%- if not loop.last -%}
+ ,
+ {% endif %}
+ {% endfor %}
+
+ }
+ }
diff --git a/chromium/third_party/dawn/infra/config/global/cr-buildbucket.cfg b/chromium/third_party/dawn/infra/config/global/cr-buildbucket.cfg
index c5d47f78214..1d4532572d8 100644
--- a/chromium/third_party/dawn/infra/config/global/cr-buildbucket.cfg
+++ b/chromium/third_party/dawn/infra/config/global/cr-buildbucket.cfg
@@ -72,7 +72,7 @@ builder_mixins {
}
builder_mixins {
name: "mac"
- dimensions: "os:Mac-10.13"
+ dimensions: "os:Mac-10.15"
caches: { # cache for depot_tools.osx_sdk recipe module
name: "osx_sdk"
path: "osx_sdk"
@@ -80,7 +80,7 @@ builder_mixins {
recipe {
properties_j: <<EOF
$depot_tools/osx_sdk:{
- "sdk_version": "10b61"
+ "sdk_version": "11b52"
}
EOF
}
diff --git a/chromium/third_party/dawn/scripts/dawn_features.gni b/chromium/third_party/dawn/scripts/dawn_features.gni
index ab1d595f6bc..37a32a05c0d 100644
--- a/chromium/third_party/dawn/scripts/dawn_features.gni
+++ b/chromium/third_party/dawn/scripts/dawn_features.gni
@@ -12,6 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import("//build_overrides/build.gni")
+
+if (build_with_chromium) {
+ import("//build/config/sanitizers/sanitizers.gni")
+}
+
+declare_args() {
+ # Enables usage of swiftshader on the Vulkan backend.
+ # Note that this will only work in standalone and in projects that set the
+ # dawn_swiftshader_dir variable in build_overrides/dawn.gni
+ # Because of how the Vulkan loader works, setting this makes Dawn only able
+ # to find the Swiftshader ICD and not the others.
+ # Enabled by default when fuzzing.
+ dawn_use_swiftshader = build_with_chromium && use_fuzzing_engine
+}
+
declare_args() {
# Enable Dawn's ASSERTs even in release builds
dawn_always_assert = false
@@ -36,7 +52,8 @@ declare_args() {
dawn_enable_opengl = is_linux && !is_chromeos
# Enables the compilation of Dawn's Vulkan backend
- dawn_enable_vulkan = is_linux || is_win || is_fuchsia || is_android
+ dawn_enable_vulkan =
+ is_linux || is_win || is_fuchsia || is_android || dawn_use_swiftshader
# Enable use of reflection compiler in spirv-cross. This is needed
# if performing reflection on systems that the platform language
@@ -45,12 +62,12 @@ declare_args() {
# compiler, since it is a sub-class of if.
dawn_enable_cross_reflection = false
- # Enables usage of swiftshader on the Vulkan backend.
- # Note that this will only work in standalone and in projects that set the
- # dawn_swiftshader_dir variable in build_overrides/dawn.gni
- # Because of how the Vulkan loader works, setting this make Dawn only able
- # to find the Swiftshader ICD and not the others.
- dawn_use_swiftshader = false
+ # Enables error injection for faking failures to native API calls
+ dawn_enable_error_injection =
+ is_debug || (build_with_chromium && use_fuzzing_engine)
+
+ # Whether Dawn should enable X11 support.
+ dawn_use_x11 = is_linux && !is_chromeos
}
# GN does not allow reading a variable defined in the same declare_args().
diff --git a/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni b/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
index b7eaee10a28..0886e032ca6 100644
--- a/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
+++ b/chromium/third_party/dawn/scripts/dawn_overrides_with_defaults.gni
@@ -41,10 +41,6 @@ if (!defined(dawn_googletest_dir)) {
dawn_googletest_dir = "//third_party/googletest"
}
-if (!defined(dawn_jsoncpp_dir)) {
- dawn_jsoncpp_dir = "//third_party/jsoncpp"
-}
-
if (!defined(dawn_shaderc_dir)) {
dawn_shaderc_dir = "//third_party/shaderc"
}
diff --git a/chromium/third_party/dawn/scripts/perf_test_runner.py b/chromium/third_party/dawn/scripts/perf_test_runner.py
index 743824f07d8..546bebdfc3e 100755
--- a/chromium/third_party/dawn/scripts/perf_test_runner.py
+++ b/chromium/third_party/dawn/scripts/perf_test_runner.py
@@ -115,9 +115,9 @@ def get_results(metric, extra_args=[]):
print(output)
sys.exit(3)
- pattern = metric + r'= ([0-9.]+)'
+ pattern = metric + r'.*= ([0-9.]+)'
m = re.findall(pattern, output)
- if m is None:
+ if not m:
print("Did not find the metric '%s' in the test output:" % metric)
print(output)
sys.exit(1)
diff --git a/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh b/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh
new file mode 100755
index 00000000000..966c1d6c89b
--- /dev/null
+++ b/chromium/third_party/dawn/scripts/update_fuzzer_seed_corpus.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Copyright 2019 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Generates a seed corpus for fuzzing based on dumping wire traces
+# from running Dawn tests
+
+# Exit if anything fails
+set -e
+
+if [ "$#" -lt 3 ]; then
+cat << EOF
+
+Usage:
+ $0 <out_dir> <fuzzer_name> <test_name> [additional_test_args...]
+
+Example:
+ $0 out/fuzz dawn_wire_server_and_vulkan_backend_fuzzer dawn_end2end_tests --gtest_filter=*Vulkan
+
+EOF
+ exit 1
+fi
+
+all_args=("$@")
+out_dir=$1
+fuzzer_name=$2
+test_name=$3
+additional_test_args=("${all_args[@]:3}")
+
+testcase_dir="/tmp/testcases/${fuzzer_name}/"
+injected_error_testcase_dir="/tmp/testcases/${fuzzer_name}_injected/"
+minimized_testcase_dir="/tmp/testcases/${fuzzer_name}_minimized/"
+
+# Print commands so it's clear what is being executed
+set -x
+
+# Make a directory for temporarily storing testcases
+mkdir -p "$testcase_dir"
+
+# Make an empty directory for temporarily storing testcases with injected errors
+rm -rf "$injected_error_testcase_dir"
+mkdir -p "$injected_error_testcase_dir"
+
+# Make an empty directory for temporarily storing minimized testcases
+rm -rf "$minimized_testcase_dir"
+mkdir -p "$minimized_testcase_dir"
+
+# Build the fuzzer and test
+autoninja -C $out_dir $fuzzer_name $test_name
+
+fuzzer_binary="${out_dir}/${fuzzer_name}"
+test_binary="${out_dir}/${test_name}"
+
+# Run the test binary
+$test_binary --use-wire --wire-trace-dir="$testcase_dir" $additional_test_args
+
+# Run the fuzzer over the testcases to inject errors
+$fuzzer_binary --injected-error-testcase-dir="$injected_error_testcase_dir" -runs=0 "$testcase_dir"
+
+# Run the fuzzer to minimize the testcases + injected errors
+$fuzzer_binary -merge=1 "$minimized_testcase_dir" "$injected_error_testcase_dir" "$testcase_dir"
+
+# Turn off command printing
+set +x
+
+if [ -z "$(ls -A $minimized_testcase_dir)" ]; then
+cat << EOF
+
+Minimized testcase directory is empty!
+Are you building with use_libfuzzer=true ?
+
+EOF
+ exit 1
+fi
+
+cat << EOF
+
+Please test the corpus in $minimized_testcase_dir with $fuzzer_name and confirm it works as expected.
+
+ $fuzzer_binary $minimized_testcase_dir
+
+Then, run the following command to upload new testcases to the seed corpus:
+
+ gsutil -m rsync $minimized_testcase_dir gs://clusterfuzz-corpus/libfuzzer/${fuzzer_name}/
+
+EOF
diff --git a/chromium/third_party/dawn/src/Dummy.cpp b/chromium/third_party/dawn/src/Dummy.cpp
new file mode 100644
index 00000000000..5959a87bb60
--- /dev/null
+++ b/chromium/third_party/dawn/src/Dummy.cpp
@@ -0,0 +1,18 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// CMake requires that targets contain at least on file. This file is used when we want to create
+// empty targets.
+
+int someSymbolToMakeXCodeHappy = 0;
diff --git a/chromium/third_party/dawn/src/common/Assert.cpp b/chromium/third_party/dawn/src/common/Assert.cpp
index fc88fd2a8e7..8802c202f38 100644
--- a/chromium/third_party/dawn/src/common/Assert.cpp
+++ b/chromium/third_party/dawn/src/common/Assert.cpp
@@ -13,14 +13,19 @@
// limitations under the License.
#include "common/Assert.h"
+#include "common/Log.h"
-#include <iostream>
+#include <cstdlib>
void HandleAssertionFailure(const char* file,
const char* function,
int line,
const char* condition) {
- std::cerr << "Assertion failure at " << file << ":" << line << " (" << function
- << "): " << condition << std::endl;
+ dawn::ErrorLog() << "Assertion failure at " << file << ":" << line << " (" << function
+ << "): " << condition;
+#if defined(DAWN_ABORT_ON_ASSERT)
+ abort();
+#else
DAWN_BREAKPOINT();
+#endif
}
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 585876d821a..321fa3097d6 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -25,6 +25,12 @@ if (build_with_chromium) {
dcheck_always_on = false
}
+if (build_with_chromium) {
+ import("//build/config/sanitizers/sanitizers.gni")
+} else {
+ use_fuzzing_engine = false
+}
+
###############################################################################
# Common dawn configs
###############################################################################
@@ -43,10 +49,16 @@ config("dawn_internal") {
]
defines = []
- if (dawn_always_assert || dcheck_always_on || is_debug) {
+ if (dawn_always_assert || dcheck_always_on || is_debug ||
+ use_fuzzing_engine) {
defines += [ "DAWN_ENABLE_ASSERTS" ]
}
+ if (use_fuzzing_engine) {
+ # Does a hard abort when an assertion fails so that fuzzers catch and parse the failure.
+ defines += [ "DAWN_ABORT_ON_ASSERT" ]
+ }
+
if (dawn_enable_d3d12) {
defines += [ "DAWN_ENABLE_BACKEND_D3D12" ]
}
@@ -63,10 +75,14 @@ config("dawn_internal") {
defines += [ "DAWN_ENABLE_BACKEND_VULKAN" ]
}
- if (is_linux && !is_chromeos) {
+ if (dawn_use_x11) {
defines += [ "DAWN_USE_X11" ]
}
+ if (dawn_enable_error_injection) {
+ defines += [ "DAWN_ENABLE_ERROR_INJECTION" ]
+ }
+
# Only internal Dawn targets can use this config, this means only targets in
# this BUILD.gn file.
visibility = [ ":*" ]
@@ -89,9 +105,15 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"Constants.h",
"DynamicLib.cpp",
"DynamicLib.h",
+ "GPUInfo.cpp",
+ "GPUInfo.h",
"HashUtils.h",
+ "LinkedList.h",
+ "Log.cpp",
+ "Log.h",
"Math.cpp",
"Math.h",
+ "PlacementAllocated.h",
"Platform.h",
"Result.cpp",
"Result.h",
@@ -99,6 +121,8 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"SerialMap.h",
"SerialQueue.h",
"SerialStorage.h",
+ "SlabAllocator.cpp",
+ "SlabAllocator.h",
"SwapChainUtils.h",
"SystemUtils.cpp",
"SystemUtils.h",
@@ -116,5 +140,8 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"../../third_party:vulkan_headers",
]
}
+ if (is_android) {
+ libs = [ "log" ]
+ }
}
}
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
new file mode 100644
index 00000000000..be9bd84b964
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -0,0 +1,52 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_common STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_common PRIVATE
+ "Assert.cpp"
+ "Assert.h"
+ "BitSetIterator.h"
+ "Compiler.h"
+ "Constants.h"
+ "DynamicLib.cpp"
+ "DynamicLib.h"
+ "GPUInfo.cpp"
+ "GPUInfo.h"
+ "HashUtils.h"
+ "LinkedList.h"
+ "Log.cpp"
+ "Log.h"
+ "Math.cpp"
+ "Math.h"
+ "PlacementAllocated.h"
+ "Platform.h"
+ "Result.cpp"
+ "Result.h"
+ "Serial.h"
+ "SerialMap.h"
+ "SerialQueue.h"
+ "SerialStorage.h"
+ "SlabAllocator.cpp"
+ "SlabAllocator.h"
+ "SwapChainUtils.h"
+ "SystemUtils.cpp"
+ "SystemUtils.h"
+ "vulkan_platform.h"
+ "windows_with_undefs.h"
+ "xlib_with_undefs.h"
+)
+target_link_libraries(dawn_common PRIVATE dawn_internal_config)
+
+# TODO Android Log support
+# TODO Vulkan headers support
diff --git a/chromium/third_party/dawn/src/common/Compiler.h b/chromium/third_party/dawn/src/common/Compiler.h
index 3bbcee6a188..8e425c90588 100644
--- a/chromium/third_party/dawn/src/common/Compiler.h
+++ b/chromium/third_party/dawn/src/common/Compiler.h
@@ -61,6 +61,9 @@
# endif
# define DAWN_DECLARE_UNUSED __attribute__((unused))
+# if defined(NDEBUG)
+# define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+# endif
// MSVC
#elif defined(_MSC_VER)
@@ -77,6 +80,9 @@ extern void __cdecl __debugbreak(void);
# endif
# define DAWN_DECLARE_UNUSED
+# if defined(NDEBUG)
+# define DAWN_FORCE_INLINE __forceinline
+# endif
#else
# error "Unsupported compiler"
@@ -97,5 +103,8 @@ extern void __cdecl __debugbreak(void);
#if !defined(DAWN_NO_DISCARD)
# define DAWN_NO_DISCARD
#endif
+#if !defined(DAWN_FORCE_INLINE)
+# define DAWN_FORCE_INLINE inline
+#endif
#endif // COMMON_COMPILER_H_
diff --git a/chromium/third_party/dawn/src/common/Constants.h b/chromium/third_party/dawn/src/common/Constants.h
index fee8a4f68d0..c157c066148 100644
--- a/chromium/third_party/dawn/src/common/Constants.h
+++ b/chromium/third_party/dawn/src/common/Constants.h
@@ -51,13 +51,6 @@ static constexpr uint64_t kDrawIndexedIndirectSize = 5 * sizeof(uint32_t);
static constexpr float kLodMin = 0.0;
static constexpr float kLodMax = 1000.0;
-static constexpr uint32_t kVendorID_AMD = 0x1002;
-static constexpr uint32_t kVendorID_ARM = 0x13B5;
-static constexpr uint32_t kVendorID_ImgTec = 0x1010;
-static constexpr uint32_t kVendorID_Intel = 0x8086;
-static constexpr uint32_t kVendorID_Nvidia = 0x10DE;
-static constexpr uint32_t kVendorID_Qualcomm = 0x5143;
-
// Max texture size constants
static constexpr uint32_t kMaxTextureSize = 8192u;
static constexpr uint32_t kMaxTexture2DArrayLayers = 256u;
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.cpp b/chromium/third_party/dawn/src/common/GPUInfo.cpp
new file mode 100644
index 00000000000..5d80fde75cd
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/GPUInfo.cpp
@@ -0,0 +1,36 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/GPUInfo.h"
+
+namespace gpu_info {
+ bool IsAMD(PCIVendorID vendorId) {
+ return vendorId == kVendorID_AMD;
+ }
+ bool IsARM(PCIVendorID vendorId) {
+ return vendorId == kVendorID_ARM;
+ }
+ bool IsImgTec(PCIVendorID vendorId) {
+ return vendorId == kVendorID_ImgTec;
+ }
+ bool IsIntel(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Intel;
+ }
+ bool IsNvidia(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Nvidia;
+ }
+ bool IsQualcomm(PCIVendorID vendorId) {
+ return vendorId == kVendorID_Qualcomm;
+ }
+} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.h b/chromium/third_party/dawn/src/common/GPUInfo.h
new file mode 100644
index 00000000000..29d6bb9b861
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/GPUInfo.h
@@ -0,0 +1,39 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_GPUINFO_H
+#define COMMON_GPUINFO_H
+
+#include <cstdint>
+
+using PCIVendorID = uint32_t;
+
+namespace gpu_info {
+
+ static constexpr PCIVendorID kVendorID_AMD = 0x1002;
+ static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
+ static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
+ static constexpr PCIVendorID kVendorID_Intel = 0x8086;
+ static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
+ static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
+
+ bool IsAMD(PCIVendorID vendorId);
+ bool IsARM(PCIVendorID vendorId);
+ bool IsImgTec(PCIVendorID vendorId);
+ bool IsIntel(PCIVendorID vendorId);
+ bool IsNvidia(PCIVendorID vendorId);
+ bool IsQualcomm(PCIVendorID vendorId);
+
+} // namespace gpu_info
+#endif // COMMON_GPUINFO_H \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/common/LinkedList.h b/chromium/third_party/dawn/src/common/LinkedList.h
new file mode 100644
index 00000000000..7c0a4139662
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/LinkedList.h
@@ -0,0 +1,193 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a copy of Chromium's /src/base/containers/linked_list.h
+
+#ifndef COMMON_LINKED_LIST_H
+#define COMMON_LINKED_LIST_H
+
+#include "common/Assert.h"
+
+// Simple LinkedList type. (See the Q&A section to understand how this
+// differs from std::list).
+//
+// To use, start by declaring the class which will be contained in the linked
+// list, as extending LinkNode (this gives it next/previous pointers).
+//
+// class MyNodeType : public LinkNode<MyNodeType> {
+// ...
+// };
+//
+// Next, to keep track of the list's head/tail, use a LinkedList instance:
+//
+// LinkedList<MyNodeType> list;
+//
+// To add elements to the list, use any of LinkedList::Append,
+// LinkNode::InsertBefore, or LinkNode::InsertAfter:
+//
+// LinkNode<MyNodeType>* n1 = ...;
+// LinkNode<MyNodeType>* n2 = ...;
+// LinkNode<MyNodeType>* n3 = ...;
+//
+// list.Append(n1);
+// list.Append(n3);
+// n3->InsertBefore(n3);
+//
+// Lastly, to iterate through the linked list forwards:
+//
+// for (LinkNode<MyNodeType>* node = list.head();
+// node != list.end();
+// node = node->next()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Or to iterate the linked list backwards:
+//
+// for (LinkNode<MyNodeType>* node = list.tail();
+// node != list.end();
+// node = node->previous()) {
+// MyNodeType* value = node->value();
+// ...
+// }
+//
+// Questions and Answers:
+//
+// Q. Should I use std::list or base::LinkedList?
+//
+// A. The main reason to use base::LinkedList over std::list is
+// performance. If you don't care about the performance differences
+// then use an STL container, as it makes for better code readability.
+//
+// Comparing the performance of base::LinkedList<T> to std::list<T*>:
+//
+// * Erasing an element of type T* from base::LinkedList<T> is
+// an O(1) operation. Whereas for std::list<T*> it is O(n).
+// That is because with std::list<T*> you must obtain an
+// iterator to the T* element before you can call erase(iterator).
+//
+// * Insertion operations with base::LinkedList<T> never require
+// heap allocations.
+//
+// Q. How does base::LinkedList implementation differ from std::list?
+//
+// A. Doubly-linked lists are made up of nodes that contain "next" and
+// "previous" pointers that reference other nodes in the list.
+//
+// With base::LinkedList<T>, the type being inserted already reserves
+// space for the "next" and "previous" pointers (base::LinkNode<T>*).
+// Whereas with std::list<T> the type can be anything, so the implementation
+// needs to glue on the "next" and "previous" pointers using
+// some internal node type.
+
+template <typename T>
+class LinkNode {
+ public:
+ LinkNode() : previous_(nullptr), next_(nullptr) {
+ }
+ LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
+ }
+
+ LinkNode(LinkNode<T>&& rhs) {
+ next_ = rhs.next_;
+ rhs.next_ = nullptr;
+ previous_ = rhs.previous_;
+ rhs.previous_ = nullptr;
+
+ // If the node belongs to a list, next_ and previous_ are both non-null.
+ // Otherwise, they are both null.
+ if (next_) {
+ next_->previous_ = this;
+ previous_->next_ = this;
+ }
+ }
+
+ // Insert |this| into the linked list, before |e|.
+ void InsertBefore(LinkNode<T>* e) {
+ this->next_ = e;
+ this->previous_ = e->previous_;
+ e->previous_->next_ = this;
+ e->previous_ = this;
+ }
+
+ // Insert |this| into the linked list, after |e|.
+ void InsertAfter(LinkNode<T>* e) {
+ this->next_ = e->next_;
+ this->previous_ = e;
+ e->next_->previous_ = this;
+ e->next_ = this;
+ }
+
+ // Check if |this| is in a list.
+ bool IsInList() const {
+ ASSERT((this->previous_ == nullptr) == (this->next_ == nullptr));
+ return this->next_ != nullptr;
+ }
+
+ // Remove |this| from the linked list.
+ void RemoveFromList() {
+ this->previous_->next_ = this->next_;
+ this->next_->previous_ = this->previous_;
+ // next() and previous() return non-null if and only this node is not in any
+ // list.
+ this->next_ = nullptr;
+ this->previous_ = nullptr;
+ }
+
+ LinkNode<T>* previous() const {
+ return previous_;
+ }
+
+ LinkNode<T>* next() const {
+ return next_;
+ }
+
+ // Cast from the node-type to the value type.
+ const T* value() const {
+ return static_cast<const T*>(this);
+ }
+
+ T* value() {
+ return static_cast<T*>(this);
+ }
+
+ private:
+ LinkNode<T>* previous_;
+ LinkNode<T>* next_;
+};
+
+template <typename T>
+class LinkedList {
+ public:
+ // The "root" node is self-referential, and forms the basis of a circular
+ // list (root_.next() will point back to the start of the list,
+ // and root_->previous() wraps around to the end of the list).
+ LinkedList() : root_(&root_, &root_) {
+ }
+
+ // Appends |e| to the end of the linked list.
+ void Append(LinkNode<T>* e) {
+ e->InsertBefore(&root_);
+ }
+
+ LinkNode<T>* head() const {
+ return root_.next();
+ }
+
+ LinkNode<T>* tail() const {
+ return root_.previous();
+ }
+
+ const LinkNode<T>* end() const {
+ return &root_;
+ }
+
+ bool empty() const {
+ return head() == end();
+ }
+
+ private:
+ LinkNode<T> root_;
+};
+#endif // COMMON_LINKED_LIST_H \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/common/Log.cpp b/chromium/third_party/dawn/src/common/Log.cpp
new file mode 100644
index 00000000000..04aeb08a951
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/Log.cpp
@@ -0,0 +1,116 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/Log.h"
+
+#include "common/Assert.h"
+#include "common/Platform.h"
+
+#include <cstdio>
+
+#if defined(DAWN_PLATFORM_ANDROID)
+# include <android/log.h>
+#endif
+
+namespace dawn {
+
+ namespace {
+
+ const char* SeverityName(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return "Debug";
+ case LogSeverity::Info:
+ return "Info";
+ case LogSeverity::Warning:
+ return "Warning";
+ case LogSeverity::Error:
+ return "Error";
+ default:
+ UNREACHABLE();
+ return "";
+ }
+ }
+
+#if defined(DAWN_PLATFORM_ANDROID)
+ android_LogPriority AndroidLogPriority(LogSeverity severity) {
+ switch (severity) {
+ case LogSeverity::Debug:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Info:
+ return ANDROID_LOG_INFO;
+ case LogSeverity::Warning:
+ return ANDROID_LOG_WARN;
+ case LogSeverity::Error:
+ return ANDROID_LOG_ERROR;
+ default:
+ UNREACHABLE();
+ return ANDROID_LOG_ERROR;
+ }
+ }
+#endif // defined(DAWN_PLATFORM_ANDROID)
+
+ } // anonymous namespace
+
+ LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+ }
+
+ LogMessage::~LogMessage() {
+ std::string fullMessage = mStream.str();
+
+ // If this message has been moved, its stream is empty.
+ if (fullMessage.empty()) {
+ return;
+ }
+
+ const char* severityName = SeverityName(mSeverity);
+
+ FILE* outputStream = stdout;
+ if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+ outputStream = stderr;
+ }
+
+#if defined(DAWN_PLATFORM_ANDROID)
+ android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+ __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+#else // defined(DAWN_PLATFORM_ANDROID)
+ // Note: we use fprintf because <iostream> includes static initializers.
+ fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+ fflush(outputStream);
+#endif // defined(DAWN_PLATFORM_ANDROID)
+ }
+
+ LogMessage DebugLog() {
+ return {LogSeverity::Debug};
+ }
+
+ LogMessage InfoLog() {
+ return {LogSeverity::Info};
+ }
+
+ LogMessage WarningLog() {
+ return {LogSeverity::Warning};
+ }
+
+ LogMessage ErrorLog() {
+ return {LogSeverity::Error};
+ }
+
+ LogMessage DebugLog(const char* file, const char* function, int line) {
+ LogMessage message = DebugLog();
+ message << file << ":" << line << "(" << function << ")";
+ return message;
+ }
+
+} // namespace dawn
diff --git a/chromium/third_party/dawn/src/common/Log.h b/chromium/third_party/dawn/src/common/Log.h
new file mode 100644
index 00000000000..0504af61ed3
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/Log.h
@@ -0,0 +1,95 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_LOG_H_
+#define COMMON_LOG_H_
+
+// Dawn targets shouldn't use iostream or printf directly for several reasons:
+// - iostream adds static initializers which we want to avoid.
+// - printf and iostream don't show up in logcat on Android so printf debugging doesn't work but
+// log-message debugging does.
+// - log severity helps provide intent compared to a printf.
+//
+// Logging should in general be avoided: errors should go through the regular WebGPU error reporting
+// mechanism and others form of logging should (TODO: eventually) go through the logging dependency
+// injection, so for example they show up in Chromium's about:gpu page. Nonetheless there are some
+// cases where logging is necessary and when this file was first introduced we needed to replace all
+// uses of iostream so we could see them in Android's logcat.
+//
+// Regular logging is done using the [Debug|Info|Warning|Error]Log() function this way:
+//
+// InfoLog() << things << that << ostringstream << supports; // No need for a std::endl or "\n"
+//
+// It creates a LogMessage object that isn't stored anywhere and gets its destructor called
+// immediately which outputs the stored ostringstream in the right place.
+//
+// This file also contains DAWN_DEBUG for "printf debugging" which works on Android and
+// additionally outputs the file, line and function name. Use it this way:
+//
+// // Pepper this throughout code to get a log of the execution
+// DAWN_DEBUG();
+//
+// // Get more information
+// DAWN_DEBUG() << texture.GetFormat();
+
+#include <sstream>
+
+namespace dawn {
+
+ // Log levels mostly used to signal intent where the log message is produced and used to route
+ // the message to the correct output.
+ enum class LogSeverity {
+ Debug,
+ Info,
+ Warning,
+ Error,
+ };
+
+ // Essentially an ostringstream that will print itself in its destructor.
+ class LogMessage {
+ public:
+ LogMessage(LogSeverity severity);
+ ~LogMessage();
+
+ LogMessage(LogMessage&& other) = default;
+ LogMessage& operator=(LogMessage&& other) = default;
+
+ template <typename T>
+ LogMessage& operator<<(T&& value) {
+ mStream << value;
+ return *this;
+ }
+
+ private:
+ LogMessage(const LogMessage& other) = delete;
+ LogMessage& operator=(const LogMessage& other) = delete;
+
+ LogSeverity mSeverity;
+ std::ostringstream mStream;
+ };
+
+ // Short-hands to create a LogMessage with the respective severity.
+ LogMessage DebugLog();
+ LogMessage InfoLog();
+ LogMessage WarningLog();
+ LogMessage ErrorLog();
+
+ // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
+ // information
+ LogMessage DebugLog(const char* file, const char* function, int line);
+#define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
+
+} // namespace dawn
+
+#endif // COMMON_LOG_H_
diff --git a/chromium/third_party/dawn/src/common/Math.cpp b/chromium/third_party/dawn/src/common/Math.cpp
index a8823e54293..4471eb77aa7 100644
--- a/chromium/third_party/dawn/src/common/Math.cpp
+++ b/chromium/third_party/dawn/src/common/Math.cpp
@@ -15,6 +15,7 @@
#include "common/Math.h"
#include "common/Assert.h"
+#include "common/Platform.h"
#include <algorithm>
#include <cmath>
@@ -50,28 +51,31 @@ uint32_t Log2(uint32_t value) {
uint32_t Log2(uint64_t value) {
ASSERT(value != 0);
#if defined(DAWN_COMPILER_MSVC)
+# if defined(DAWN_PLATFORM_64_BIT)
unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
ASSERT(ret != 0);
return firstBitIndex;
-#else
+# else // defined(DAWN_PLATFORM_64_BIT)
+ unsigned long firstBitIndex = 0ul;
+ if (_BitScanReverse(&firstBitIndex, value >> 32)) {
+ return firstBitIndex + 32;
+ }
+ unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
+ ASSERT(ret != 0);
+ return firstBitIndex;
+# endif // defined(DAWN_PLATFORM_64_BIT)
+#else // defined(DAWN_COMPILER_MSVC)
return 63 - static_cast<uint32_t>(__builtin_clzll(value));
-#endif
+#endif // defined(DAWN_COMPILER_MSVC)
}
uint64_t NextPowerOfTwo(uint64_t n) {
-#if defined(DAWN_COMPILER_MSVC)
if (n <= 1) {
return 1;
}
- unsigned long firstBitIndex = 0ul;
- unsigned char ret = _BitScanReverse64(&firstBitIndex, n - 1);
- ASSERT(ret != 0);
- return 1ull << (firstBitIndex + 1);
-#else
- return n <= 1 ? 1 : 1ull << (64 - __builtin_clzll(n - 1));
-#endif
+ return 1ull << (Log2(n - 1) + 1);
}
bool IsPowerOfTwo(uint64_t n) {
@@ -85,13 +89,6 @@ bool IsPtrAligned(const void* ptr, size_t alignment) {
return (reinterpret_cast<size_t>(ptr) & (alignment - 1)) == 0;
}
-void* AlignVoidPtr(void* ptr, size_t alignment) {
- ASSERT(IsPowerOfTwo(alignment));
- ASSERT(alignment != 0);
- return reinterpret_cast<void*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
- ~(alignment - 1));
-}
-
bool IsAligned(uint32_t value, size_t alignment) {
ASSERT(alignment <= UINT32_MAX);
ASSERT(IsPowerOfTwo(alignment));
diff --git a/chromium/third_party/dawn/src/common/Math.h b/chromium/third_party/dawn/src/common/Math.h
index ac40dd96724..5ee915ef73e 100644
--- a/chromium/third_party/dawn/src/common/Math.h
+++ b/chromium/third_party/dawn/src/common/Math.h
@@ -15,6 +15,8 @@
#ifndef COMMON_MATH_H_
#define COMMON_MATH_H_
+#include "common/Assert.h"
+
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -35,13 +37,19 @@ bool IsAligned(uint32_t value, size_t alignment);
uint32_t Align(uint32_t value, size_t alignment);
template <typename T>
-T* AlignPtr(T* ptr, size_t alignment) {
- return static_cast<T*>(AlignVoidPtr(ptr, alignment));
+DAWN_FORCE_INLINE T* AlignPtr(T* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return reinterpret_cast<T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+ ~(alignment - 1));
}
template <typename T>
-const T* AlignPtr(const T* ptr, size_t alignment) {
- return static_cast<const T*>(AlignVoidPtr(const_cast<T*>(ptr), alignment));
+DAWN_FORCE_INLINE const T* AlignPtr(const T* ptr, size_t alignment) {
+ ASSERT(IsPowerOfTwo(alignment));
+ ASSERT(alignment != 0);
+ return reinterpret_cast<const T*>((reinterpret_cast<size_t>(ptr) + (alignment - 1)) &
+ ~(alignment - 1));
}
template <typename destType, typename sourceType>
diff --git a/chromium/third_party/dawn/src/common/PlacementAllocated.h b/chromium/third_party/dawn/src/common/PlacementAllocated.h
new file mode 100644
index 00000000000..6bb329c3d5c
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/PlacementAllocated.h
@@ -0,0 +1,37 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_PLACEMENTALLOCATED_H_
+#define COMMON_PLACEMENTALLOCATED_H_
+
+#include <cstddef>
+
+class PlacementAllocated {
+ public:
+ // Delete the default new operator so this can only be created with placement new.
+ void* operator new(size_t) = delete;
+
+ void* operator new(size_t size, void* ptr) {
+ // Pass through the pointer of the allocation. This is essentially the default
+ // placement-new implementation, but we must define it if we delete the default
+ // new operator.
+ return ptr;
+ }
+
+ void operator delete(void* ptr) {
+ // Object is placement-allocated. Don't free the memory.
+ }
+};
+
+#endif // COMMON_PLACEMENTALLOCATED_H_
diff --git a/chromium/third_party/dawn/src/common/Result.h b/chromium/third_party/dawn/src/common/Result.h
index 3e33052fb35..7dc5e2df9ee 100644
--- a/chromium/third_party/dawn/src/common/Result.h
+++ b/chromium/third_party/dawn/src/common/Result.h
@@ -20,6 +20,7 @@
#include <cstddef>
#include <cstdint>
+#include <memory>
#include <type_traits>
#include <utility>
@@ -38,10 +39,10 @@
template <typename T, typename E>
class Result;
-// The interface of Result<T, E> shoud look like the following.
+// The interface of Result<T, E> should look like the following.
// public:
// Result(T&& success);
-// Result(E&& error);
+// Result(std::unique_ptr<E> error);
//
// Result(Result<T, E>&& other);
// Result<T, E>& operator=(Result<T, E>&& other);
@@ -52,18 +53,18 @@ class Result;
// bool IsSuccess() const;
//
// T&& AcquireSuccess();
-// E&& AcquireError();
+// std::unique_ptr<E> AcquireError();
// Specialization of Result for returning errors only via pointers. It is basically a pointer
// where nullptr is both Success and Empty.
template <typename E>
-class DAWN_NO_DISCARD Result<void, E*> {
+class DAWN_NO_DISCARD Result<void, E> {
public:
Result();
- Result(E* error);
+ Result(std::unique_ptr<E> error);
- Result(Result<void, E*>&& other);
- Result<void, E*>& operator=(Result<void, E>&& other);
+ Result(Result<void, E>&& other);
+ Result<void, E>& operator=(Result<void, E>&& other);
~Result();
@@ -71,10 +72,10 @@ class DAWN_NO_DISCARD Result<void, E*> {
bool IsSuccess() const;
void AcquireSuccess();
- E* AcquireError();
+ std::unique_ptr<E> AcquireError();
private:
- E* mError = nullptr;
+ std::unique_ptr<E> mError;
};
// Uses SFINAE to try to get alignof(T) but fallback to Default if T isn't defined.
@@ -108,7 +109,7 @@ namespace detail {
} // namespace detail
template <typename T, typename E>
-class DAWN_NO_DISCARD Result<T*, E*> {
+class DAWN_NO_DISCARD Result<T*, E> {
public:
static_assert(alignof_if_defined_else_default<T, 4> >= 4,
"Result<T*, E*> reserves two bits for tagging pointers");
@@ -116,13 +117,13 @@ class DAWN_NO_DISCARD Result<T*, E*> {
"Result<T*, E*> reserves two bits for tagging pointers");
Result(T* success);
- Result(E* error);
+ Result(std::unique_ptr<E> error);
// Support returning a Result<T*, E*> from a Result<TChild*, E*>
template <typename TChild>
- Result(Result<TChild*, E*>&& other);
+ Result(Result<TChild*, E>&& other);
template <typename TChild>
- Result<T*, E*>& operator=(Result<TChild*, E>&& other);
+ Result<T*, E>& operator=(Result<TChild*, E>&& other);
~Result();
@@ -130,7 +131,7 @@ class DAWN_NO_DISCARD Result<T*, E*> {
bool IsSuccess() const;
T* AcquireSuccess();
- E* AcquireError();
+ std::unique_ptr<E> AcquireError();
private:
template <typename T2, typename E2>
@@ -140,7 +141,7 @@ class DAWN_NO_DISCARD Result<T*, E*> {
};
template <typename T, typename E>
-class DAWN_NO_DISCARD Result<const T*, E*> {
+class DAWN_NO_DISCARD Result<const T*, E> {
public:
static_assert(alignof_if_defined_else_default<T, 4> >= 4,
"Result<T*, E*> reserves two bits for tagging pointers");
@@ -148,10 +149,10 @@ class DAWN_NO_DISCARD Result<const T*, E*> {
"Result<T*, E*> reserves two bits for tagging pointers");
Result(const T* success);
- Result(E* error);
+ Result(std::unique_ptr<E> error);
- Result(Result<const T*, E*>&& other);
- Result<const T*, E*>& operator=(Result<const T*, E>&& other);
+ Result(Result<const T*, E>&& other);
+ Result<const T*, E>& operator=(Result<const T*, E>&& other);
~Result();
@@ -159,7 +160,7 @@ class DAWN_NO_DISCARD Result<const T*, E*> {
bool IsSuccess() const;
const T* AcquireSuccess();
- E* AcquireError();
+ std::unique_ptr<E> AcquireError();
private:
intptr_t mPayload = detail::kEmptyPayload;
@@ -172,7 +173,7 @@ template <typename T, typename E>
class DAWN_NO_DISCARD Result {
public:
Result(T&& success);
- Result(E&& error);
+ Result(std::unique_ptr<E> error);
Result(Result<T, E>&& other);
Result<T, E>& operator=(Result<T, E>&& other);
@@ -183,7 +184,7 @@ class DAWN_NO_DISCARD Result {
bool IsSuccess() const;
T&& AcquireSuccess();
- E&& AcquireError();
+ std::unique_ptr<E> AcquireError();
private:
enum PayloadType {
@@ -193,56 +194,52 @@ class DAWN_NO_DISCARD Result {
};
PayloadType mType;
- E mError;
+ std::unique_ptr<E> mError;
T mSuccess;
};
-// Implementation of Result<void, E*>
+// Implementation of Result<void, E>
template <typename E>
-Result<void, E*>::Result() {
+Result<void, E>::Result() {
}
template <typename E>
-Result<void, E*>::Result(E* error) : mError(error) {
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
}
template <typename E>
-Result<void, E*>::Result(Result<void, E*>&& other) : mError(other.mError) {
- other.mError = nullptr;
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
}
template <typename E>
-Result<void, E*>& Result<void, E*>::operator=(Result<void, E>&& other) {
+Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
ASSERT(mError == nullptr);
- mError = other.mError;
- other.mError = nullptr;
+ mError = std::move(other.mError);
return *this;
}
template <typename E>
-Result<void, E*>::~Result() {
+Result<void, E>::~Result() {
ASSERT(mError == nullptr);
}
template <typename E>
-bool Result<void, E*>::IsError() const {
+bool Result<void, E>::IsError() const {
return mError != nullptr;
}
template <typename E>
-bool Result<void, E*>::IsSuccess() const {
+bool Result<void, E>::IsSuccess() const {
return mError == nullptr;
}
template <typename E>
-void Result<void, E*>::AcquireSuccess() {
+void Result<void, E>::AcquireSuccess() {
}
template <typename E>
-E* Result<void, E*>::AcquireError() {
- E* error = mError;
- mError = nullptr;
- return error;
+std::unique_ptr<E> Result<void, E>::AcquireError() {
+ return std::move(mError);
}
// Implementation details of the tagged pointer Results
@@ -262,25 +259,26 @@ namespace detail {
} // namespace detail
-// Implementation of Result<T*, E*>
+// Implementation of Result<T*, E>
template <typename T, typename E>
-Result<T*, E*>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
}
template <typename T, typename E>
-Result<T*, E*>::Result(E* error) : mPayload(detail::MakePayload(error, detail::Error)) {
+Result<T*, E>::Result(std::unique_ptr<E> error)
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {
}
template <typename T, typename E>
template <typename TChild>
-Result<T*, E*>::Result(Result<TChild*, E*>&& other) : mPayload(other.mPayload) {
+Result<T*, E>::Result(Result<TChild*, E>&& other) : mPayload(other.mPayload) {
other.mPayload = detail::kEmptyPayload;
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
}
template <typename T, typename E>
template <typename TChild>
-Result<T*, E*>& Result<T*, E*>::operator=(Result<TChild*, E>&& other) {
+Result<T*, E>& Result<T*, E>::operator=(Result<TChild*, E>&& other) {
ASSERT(mPayload == detail::kEmptyPayload);
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
mPayload = other.mPayload;
@@ -289,51 +287,52 @@ Result<T*, E*>& Result<T*, E*>::operator=(Result<TChild*, E>&& other) {
}
template <typename T, typename E>
-Result<T*, E*>::~Result() {
+Result<T*, E>::~Result() {
ASSERT(mPayload == detail::kEmptyPayload);
}
template <typename T, typename E>
-bool Result<T*, E*>::IsError() const {
+bool Result<T*, E>::IsError() const {
return detail::GetPayloadType(mPayload) == detail::Error;
}
template <typename T, typename E>
-bool Result<T*, E*>::IsSuccess() const {
+bool Result<T*, E>::IsSuccess() const {
return detail::GetPayloadType(mPayload) == detail::Success;
}
template <typename T, typename E>
-T* Result<T*, E*>::AcquireSuccess() {
+T* Result<T*, E>::AcquireSuccess() {
T* success = detail::GetSuccessFromPayload<T>(mPayload);
mPayload = detail::kEmptyPayload;
return success;
}
template <typename T, typename E>
-E* Result<T*, E*>::AcquireError() {
- E* error = detail::GetErrorFromPayload<E>(mPayload);
+std::unique_ptr<E> Result<T*, E>::AcquireError() {
+ std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
mPayload = detail::kEmptyPayload;
- return error;
+ return std::move(error);
}
// Implementation of Result<const T*, E*>
template <typename T, typename E>
-Result<const T*, E*>::Result(const T* success)
+Result<const T*, E>::Result(const T* success)
: mPayload(detail::MakePayload(success, detail::Success)) {
}
template <typename T, typename E>
-Result<const T*, E*>::Result(E* error) : mPayload(detail::MakePayload(error, detail::Error)) {
+Result<const T*, E>::Result(std::unique_ptr<E> error)
+ : mPayload(detail::MakePayload(error.release(), detail::Error)) {
}
template <typename T, typename E>
-Result<const T*, E*>::Result(Result<const T*, E*>&& other) : mPayload(other.mPayload) {
+Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
other.mPayload = detail::kEmptyPayload;
}
template <typename T, typename E>
-Result<const T*, E*>& Result<const T*, E*>::operator=(Result<const T*, E>&& other) {
+Result<const T*, E>& Result<const T*, E>::operator=(Result<const T*, E>&& other) {
ASSERT(mPayload == detail::kEmptyPayload);
mPayload = other.mPayload;
other.mPayload = detail::kEmptyPayload;
@@ -341,32 +340,32 @@ Result<const T*, E*>& Result<const T*, E*>::operator=(Result<const T*, E>&& othe
}
template <typename T, typename E>
-Result<const T*, E*>::~Result() {
+Result<const T*, E>::~Result() {
ASSERT(mPayload == detail::kEmptyPayload);
}
template <typename T, typename E>
-bool Result<const T*, E*>::IsError() const {
+bool Result<const T*, E>::IsError() const {
return detail::GetPayloadType(mPayload) == detail::Error;
}
template <typename T, typename E>
-bool Result<const T*, E*>::IsSuccess() const {
+bool Result<const T*, E>::IsSuccess() const {
return detail::GetPayloadType(mPayload) == detail::Success;
}
template <typename T, typename E>
-const T* Result<const T*, E*>::AcquireSuccess() {
+const T* Result<const T*, E>::AcquireSuccess() {
T* success = detail::GetSuccessFromPayload<T>(mPayload);
mPayload = detail::kEmptyPayload;
return success;
}
template <typename T, typename E>
-E* Result<const T*, E*>::AcquireError() {
- E* error = detail::GetErrorFromPayload<E>(mPayload);
+std::unique_ptr<E> Result<const T*, E>::AcquireError() {
+ std::unique_ptr<E> error(detail::GetErrorFromPayload<E>(mPayload));
mPayload = detail::kEmptyPayload;
- return error;
+ return std::move(error);
}
// Implementation of Result<T, E>
@@ -375,7 +374,7 @@ Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success))
}
template <typename T, typename E>
-Result<T, E>::Result(E&& error) : mType(Error), mError(std::move(error)) {
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
}
template <typename T, typename E>
@@ -415,7 +414,7 @@ T&& Result<T, E>::AcquireSuccess() {
}
template <typename T, typename E>
-E&& Result<T, E>::AcquireError() {
+std::unique_ptr<E> Result<T, E>::AcquireError() {
ASSERT(mType == Error);
mType = Acquired;
return std::move(mError);
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.cpp b/chromium/third_party/dawn/src/common/SlabAllocator.cpp
new file mode 100644
index 00000000000..61948873ba8
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/SlabAllocator.cpp
@@ -0,0 +1,249 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/SlabAllocator.h"
+
+#include "common/Assert.h"
+#include "common/Math.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <limits>
+#include <new>
+
+// IndexLinkNode
+
+SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
+ : index(index), nextIndex(nextIndex) {
+}
+
+// Slab
+
+SlabAllocatorImpl::Slab::Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head)
+ : allocation(std::move(allocation)),
+ freeList(head),
+ prev(nullptr),
+ next(nullptr),
+ blocksInUse(0) {
+}
+
+SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
+}
+
+SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
+
+SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
+ Slab* slab = this->next;
+ while (slab != nullptr) {
+ Slab* next = slab->next;
+ ASSERT(slab->blocksInUse == 0);
+ slab->~Slab();
+ slab = next;
+ }
+}
+
+// SlabAllocatorImpl
+
+SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
+ std::numeric_limits<SlabAllocatorImpl::Index>::max();
+
+SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
+ uint32_t objectSize,
+ uint32_t objectAlignment)
+ : mAllocationAlignment(std::max(static_cast<uint32_t>(alignof(Slab)), objectAlignment)),
+ mSlabBlocksOffset(Align(sizeof(Slab), objectAlignment)),
+ mIndexLinkNodeOffset(Align(objectSize, alignof(IndexLinkNode))),
+ mBlockStride(Align(mIndexLinkNodeOffset + sizeof(IndexLinkNode), objectAlignment)),
+ mBlocksPerSlab(blocksPerSlab),
+ mTotalAllocationSize(
+ // required allocation size
+ static_cast<size_t>(mSlabBlocksOffset) + mBlocksPerSlab * mBlockStride +
+ // Pad the allocation size by mAllocationAlignment so that the aligned allocation still
+ // fulfills the required size.
+ mAllocationAlignment) {
+ ASSERT(IsPowerOfTwo(mAllocationAlignment));
+}
+
+SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
+ : mAllocationAlignment(rhs.mAllocationAlignment),
+ mSlabBlocksOffset(rhs.mSlabBlocksOffset),
+ mIndexLinkNodeOffset(rhs.mIndexLinkNodeOffset),
+ mBlockStride(rhs.mBlockStride),
+ mBlocksPerSlab(rhs.mBlocksPerSlab),
+ mTotalAllocationSize(rhs.mTotalAllocationSize),
+ mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
+ mFullSlabs(std::move(rhs.mFullSlabs)),
+ mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
+}
+
+SlabAllocatorImpl::~SlabAllocatorImpl() = default;
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::OffsetFrom(
+ IndexLinkNode* node,
+ std::make_signed_t<Index> offset) const {
+ return reinterpret_cast<IndexLinkNode*>(reinterpret_cast<char*>(node) +
+ static_cast<intptr_t>(mBlockStride) * offset);
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::NodeFromObject(void* object) const {
+ return reinterpret_cast<SlabAllocatorImpl::IndexLinkNode*>(static_cast<char*>(object) +
+ mIndexLinkNodeOffset);
+}
+
+void* SlabAllocatorImpl::ObjectFromNode(IndexLinkNode* node) const {
+ return static_cast<void*>(reinterpret_cast<char*>(node) - mIndexLinkNodeOffset);
+}
+
+bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const {
+ char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset;
+ IndexLinkNode* firstNode = NodeFromObject(firstObjectPtr);
+ IndexLinkNode* lastNode = OffsetFrom(firstNode, mBlocksPerSlab - 1);
+ return node >= firstNode && node <= lastNode && node->index < mBlocksPerSlab;
+}
+
+void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const {
+ ASSERT(IsNodeInSlab(slab, node));
+
+ IndexLinkNode* head = slab->freeList;
+ if (head == nullptr) {
+ node->nextIndex = kInvalidIndex;
+ } else {
+ ASSERT(IsNodeInSlab(slab, head));
+ node->nextIndex = head->index;
+ }
+ slab->freeList = node;
+
+ ASSERT(slab->blocksInUse != 0);
+ slab->blocksInUse--;
+}
+
+SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::PopFront(Slab* slab) const {
+ ASSERT(slab->freeList != nullptr);
+
+ IndexLinkNode* head = slab->freeList;
+ if (head->nextIndex == kInvalidIndex) {
+ slab->freeList = nullptr;
+ } else {
+ ASSERT(IsNodeInSlab(slab, head));
+ slab->freeList = OffsetFrom(head, head->nextIndex - head->index);
+ ASSERT(IsNodeInSlab(slab, slab->freeList));
+ }
+
+ ASSERT(slab->blocksInUse < mBlocksPerSlab);
+ slab->blocksInUse++;
+ return head;
+}
+
+void SlabAllocatorImpl::SentinelSlab::Prepend(SlabAllocatorImpl::Slab* slab) {
+ if (this->next != nullptr) {
+ this->next->prev = slab;
+ }
+ slab->prev = this;
+ slab->next = this->next;
+ this->next = slab;
+}
+
+void SlabAllocatorImpl::Slab::Splice() {
+ SlabAllocatorImpl::Slab* originalPrev = this->prev;
+ SlabAllocatorImpl::Slab* originalNext = this->next;
+
+ this->prev = nullptr;
+ this->next = nullptr;
+
+ ASSERT(originalPrev != nullptr);
+
+ // Set the originalNext's prev pointer.
+ if (originalNext != nullptr) {
+ originalNext->prev = originalPrev;
+ }
+
+ // Now, set the originalNext as the originalPrev's new next.
+ originalPrev->next = originalNext;
+}
+
+void* SlabAllocatorImpl::Allocate() {
+ if (mAvailableSlabs.next == nullptr) {
+ GetNewSlab();
+ }
+
+ Slab* slab = mAvailableSlabs.next;
+ IndexLinkNode* node = PopFront(slab);
+ ASSERT(node != nullptr);
+
+ // Move full slabs to a separate list, so allocate can always return quickly.
+ if (slab->blocksInUse == mBlocksPerSlab) {
+ slab->Splice();
+ mFullSlabs.Prepend(slab);
+ }
+
+ return ObjectFromNode(node);
+}
+
+void SlabAllocatorImpl::Deallocate(void* ptr) {
+ IndexLinkNode* node = NodeFromObject(ptr);
+
+ ASSERT(node->index < mBlocksPerSlab);
+ void* firstAllocation = ObjectFromNode(OffsetFrom(node, -node->index));
+ Slab* slab = reinterpret_cast<Slab*>(static_cast<char*>(firstAllocation) - mSlabBlocksOffset);
+ ASSERT(slab != nullptr);
+
+ bool slabWasFull = slab->blocksInUse == mBlocksPerSlab;
+
+ ASSERT(slab->blocksInUse != 0);
+ PushFront(slab, node);
+
+ if (slabWasFull) {
+ // Slab is in the full list. Move it to the recycled list.
+ ASSERT(slab->freeList != nullptr);
+ slab->Splice();
+ mRecycledSlabs.Prepend(slab);
+ }
+
+ // TODO(enga): Occasionally prune slabs if |blocksInUse == 0|.
+ // Doing so eagerly hurts performance.
+}
+
+void SlabAllocatorImpl::GetNewSlab() {
+ // Should only be called when there are no available slabs.
+ ASSERT(mAvailableSlabs.next == nullptr);
+
+ if (mRecycledSlabs.next != nullptr) {
+ // If the recycled list is non-empty, swap their contents.
+ std::swap(mAvailableSlabs.next, mRecycledSlabs.next);
+
+ // We swapped the next pointers, so the prev pointer is wrong.
+ // Update it here.
+ mAvailableSlabs.next->prev = &mAvailableSlabs;
+ ASSERT(mRecycledSlabs.next == nullptr);
+ return;
+ }
+
+ // TODO(enga): Use aligned_alloc with C++17.
+ auto allocation = std::unique_ptr<char[]>(new char[mTotalAllocationSize]);
+ char* alignedPtr = AlignPtr(allocation.get(), mAllocationAlignment);
+
+ char* dataStart = alignedPtr + mSlabBlocksOffset;
+
+ IndexLinkNode* node = NodeFromObject(dataStart);
+ for (uint32_t i = 0; i < mBlocksPerSlab; ++i) {
+ new (OffsetFrom(node, i)) IndexLinkNode(i, i + 1);
+ }
+
+ IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
+ lastNode->nextIndex = kInvalidIndex;
+
+ mAvailableSlabs.Prepend(new (alignedPtr) Slab(std::move(allocation), node));
+}
diff --git a/chromium/third_party/dawn/src/common/SlabAllocator.h b/chromium/third_party/dawn/src/common/SlabAllocator.h
new file mode 100644
index 00000000000..939f1c029d1
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/SlabAllocator.h
@@ -0,0 +1,184 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SLABALLOCATOR_H_
+#define COMMON_SLABALLOCATOR_H_
+
+#include "common/PlacementAllocated.h"
+
+#include <cstdint>
+#include <memory>
+#include <type_traits>
+
+// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
+// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
+// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
+// recycles memory from previous allocations, except multiple allocations are hosted contiguously in
+// one large slab.
+//
+// Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing
+// into an std::vector. To service an allocation request, the allocator only needs to know the first
+// currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED.
+// A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no
+// remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is
+// moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that
+// deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab
+// servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED
+// list.
+//
+// Allocated objects are placement-allocated with some extra info at the end (we'll call the Object
+// plus the extra bytes a "block") used to specify the constant index of the block in its parent
+// slab, as well as the index of the next available block. So, following the block next-indices
+// forms a linked list of free blocks.
+//
+// Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the
+// slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and
+// next-indices are initialized to form the free-list of blocks.
+//
+// Allocation: When an object is allocated, if there is no space available in an existing slab, a
+// new slab is created (or an old slab is recycled). The first block of the slab is removed and
+// returned.
+//
+// Deallocation: When an object is deallocated, it can compute the pointer to its parent slab
+// because it stores the index of its own allocation. That block is then prepended to the slab's
+// free list.
+class SlabAllocatorImpl {
+ public:
+ // Allocations host their current index and the index of the next free block.
+ // Because this is an index, and not a byte offset, it can be much smaller than a size_t.
+ // TODO(enga): Is uint8_t sufficient?
+ using Index = uint16_t;
+
+ SlabAllocatorImpl(SlabAllocatorImpl&& rhs);
+
+ protected:
+ // This is essentially a singly linked list using indices instead of pointers,
+ // so we store the index of "this" in |this->index|.
+ struct IndexLinkNode : PlacementAllocated {
+ IndexLinkNode(Index index, Index nextIndex);
+
+ const Index index; // The index of this block in the slab.
+ Index nextIndex; // The index of the next available block. kInvalidIndex, if none.
+ };
+
+ struct Slab : PlacementAllocated {
+ // A slab is placement-allocated into an aligned pointer from a separate allocation.
+ // Ownership of the allocation is transferred to the slab on creation.
+ // | ---------- allocation --------- |
+ // | pad | Slab | data ------------> |
+ Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head);
+ Slab(Slab&& rhs);
+
+ void Splice();
+
+ std::unique_ptr<char[]> allocation;
+ IndexLinkNode* freeList;
+ Slab* prev;
+ Slab* next;
+ Index blocksInUse;
+ };
+
+ SlabAllocatorImpl(Index blocksPerSlab, uint32_t objectSize, uint32_t objectAlignment);
+ ~SlabAllocatorImpl();
+
+ // Allocate a new block of memory.
+ void* Allocate();
+
+ // Deallocate a block of memory.
+ void Deallocate(void* ptr);
+
+ private:
+ // The maximum value is reserved to indicate the end of the list.
+ static Index kInvalidIndex;
+
+ // Get the IndexLinkNode |offset| slots away.
+ IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const;
+
+ // Compute the pointer to the IndexLinkNode from an allocated object.
+ IndexLinkNode* NodeFromObject(void* object) const;
+
+ // Compute the pointer to the object from an IndexLinkNode.
+ void* ObjectFromNode(IndexLinkNode* node) const;
+
+ bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
+
+ // The Slab stores a linked-list of free allocations.
+ // PushFront/PopFront adds/removes an allocation from the free list.
+ void PushFront(Slab* slab, IndexLinkNode* node) const;
+ IndexLinkNode* PopFront(Slab* slab) const;
+
+ // Replace the current slab with a new one, and chain the old one off of it.
+ // Both slabs may still be used for for allocation/deallocation, but older slabs
+ // will be a little slower to get allocations from.
+ void GetNewSlab();
+
+ const uint32_t mAllocationAlignment;
+
+ // | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | ....
+ // | -----------| mSlabBlocksOffset
+ // | | ---------------------- | mBlockStride
+ // | | ----------| mIndexLinkNodeOffset
+ // | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride)
+
+ // A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is
+ // the offset to the start of the aligned memory region.
+ const uint32_t mSlabBlocksOffset;
+
+ // The IndexLinkNode is stored after the Allocation itself. This is the offset to it.
+ const uint32_t mIndexLinkNodeOffset;
+
+ // Because alignment of allocations may introduce padding, |mBlockStride| is the
+ // distance between aligned blocks of (Allocation + IndexLinkNode)
+ const uint32_t mBlockStride;
+
+ const Index mBlocksPerSlab; // The total number of blocks in a slab.
+
+ const size_t mTotalAllocationSize;
+
+ struct SentinelSlab : Slab {
+ SentinelSlab();
+ ~SentinelSlab();
+
+ SentinelSlab(SentinelSlab&& rhs);
+
+ void Prepend(Slab* slab);
+ };
+
+ SentinelSlab mAvailableSlabs; // Available slabs to service allocations.
+ SentinelSlab mFullSlabs; // Full slabs. Stored here so we can skip checking them.
+ SentinelSlab mRecycledSlabs; // Recycled slabs. Not immediately added to |mAvailableSlabs| so
+ // we don't thrash the current "active" slab.
+};
+
+template <typename T>
+class SlabAllocator : public SlabAllocatorImpl {
+ public:
+ SlabAllocator(size_t totalObjectBytes,
+ uint32_t objectSize = sizeof(T),
+ uint32_t objectAlignment = alignof(T))
+ : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
+ }
+
+ template <typename... Args>
+ T* Allocate(Args&&... args) {
+ void* ptr = SlabAllocatorImpl::Allocate();
+ return new (ptr) T(std::forward<Args>(args)...);
+ }
+
+ void Deallocate(T* object) {
+ SlabAllocatorImpl::Deallocate(object);
+ }
+};
+
+#endif // COMMON_SLABALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.cpp b/chromium/third_party/dawn/src/common/SystemUtils.cpp
index 88fc7d7faa9..73aa4ee640d 100644
--- a/chromium/third_party/dawn/src/common/SystemUtils.cpp
+++ b/chromium/third_party/dawn/src/common/SystemUtils.cpp
@@ -21,7 +21,7 @@
# include <limits.h>
# include <unistd.h>
# include <cstdlib>
-#elif defined(DAWN_PLATFORM_MACOS)
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
# include <mach-o/dyld.h>
# include <vector>
#endif
@@ -88,7 +88,7 @@ std::string GetExecutablePath() {
path[result] = '\0';
return path.data();
}
-#elif defined(DAWN_PLATFORM_MACOS)
+#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
std::string GetExecutablePath() {
uint32_t size = 0;
_NSGetExecutablePath(nullptr, &size);
diff --git a/chromium/third_party/dawn/src/common/vulkan_platform.h b/chromium/third_party/dawn/src/common/vulkan_platform.h
index 0011a3103c1..113e831f432 100644
--- a/chromium/third_party/dawn/src/common/vulkan_platform.h
+++ b/chromium/third_party/dawn/src/common/vulkan_platform.h
@@ -18,6 +18,9 @@
#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
# error "vulkan_platform.h included without the Vulkan backend enabled"
#endif
+#if defined(VULKAN_CORE_H_)
+# error "vulkan.h included before vulkan_platform.h"
+#endif
#include "common/Platform.h"
@@ -33,10 +36,9 @@
// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
#if defined(DAWN_PLATFORM_64_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
- using object##Native = struct object##_T*;
+# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
#elif defined(DAWN_PLATFORM_32_BIT)
-# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object##Native = uint64_t;
+# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
#else
# error "Unsupported platform"
#endif
@@ -53,117 +55,135 @@ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
// One way to get the alignment inside structures of a type is to look at the alignment of it
// wrapped in a structure. Hence VkSameHandleNativeWrappe
-template <typename T>
-struct WrapperStruct {
- T member;
-};
-
-template <typename T>
-static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
-
-static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandleNative>;
-static constexpr size_t kUint64Alignment = AlignOfInStruct<VkSomeHandleNative>;
-
-// Simple handle types that supports "nullptr_t" as a 0 value.
-template <typename Tag, typename HandleType>
-class alignas(kNativeVkHandleAlignment) VkNonDispatchableHandle {
- public:
- // Default constructor and assigning of VK_NULL_HANDLE
- VkNonDispatchableHandle() = default;
- VkNonDispatchableHandle(std::nullptr_t) : mHandle(0) {
+namespace dawn_native { namespace vulkan {
+
+ namespace detail {
+ template <typename T>
+ struct WrapperStruct {
+ T member;
+ };
+
+ template <typename T>
+ static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+
+ static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+ static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+
+ // Simple handle types that supports "nullptr_t" as a 0 value.
+ template <typename Tag, typename HandleType>
+ class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+ public:
+ // Default constructor and assigning of VK_NULL_HANDLE
+ VkHandle() = default;
+ VkHandle(std::nullptr_t) {
+ }
+
+ // Use default copy constructor/assignment
+ VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+ VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+
+ // Comparisons between handles
+ bool operator==(VkHandle<Tag, HandleType> other) const {
+ return mHandle == other.mHandle;
+ }
+ bool operator!=(VkHandle<Tag, HandleType> other) const {
+ return mHandle != other.mHandle;
+ }
+
+ // Comparisons between handles and VK_NULL_HANDLE
+ bool operator==(std::nullptr_t) const {
+ return mHandle == 0;
+ }
+ bool operator!=(std::nullptr_t) const {
+ return mHandle != 0;
+ }
+
+ // Implicit conversion to real Vulkan types.
+ operator HandleType() const {
+ return GetHandle();
+ }
+
+ HandleType GetHandle() const {
+ return mHandle;
+ }
+
+ HandleType& operator*() {
+ return mHandle;
+ }
+
+ static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+ return VkHandle{handle};
+ }
+
+ private:
+ explicit VkHandle(HandleType handle) : mHandle(handle) {
+ }
+
+ HandleType mHandle = 0;
+ };
+ } // namespace detail
+
+ static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+
+ template <typename Tag, typename HandleType>
+ HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+ return reinterpret_cast<HandleType*>(handle);
}
- // Use default copy constructor/assignment
- VkNonDispatchableHandle(const VkNonDispatchableHandle<Tag, HandleType>& other) = default;
- VkNonDispatchableHandle& operator=(const VkNonDispatchableHandle<Tag, HandleType>&) = default;
+}} // namespace dawn_native::vulkan
+
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
+ namespace dawn_native { namespace vulkan { \
+ using object = detail::VkHandle<struct VkTag##object, ::object>; \
+ static_assert(sizeof(object) == sizeof(uint64_t), ""); \
+ static_assert(alignof(object) == detail::kUint64Alignment, ""); \
+ static_assert(sizeof(object) == sizeof(::object), ""); \
+ static_assert(alignof(object) == detail::kNativeVkHandleAlignment, ""); \
+ } \
+ } // namespace dawn_native::vulkan
+
+// Import additional parts of Vulkan that are supported on our architecture and preemptively include
+// headers that vulkan.h includes that we have "undefs" for.
+#if defined(DAWN_PLATFORM_WINDOWS)
+# define VK_USE_PLATFORM_WIN32_KHR
+# include "common/windows_with_undefs.h"
+#endif // DAWN_PLATFORM_WINDOWS
- // Comparisons between handles
- bool operator==(VkNonDispatchableHandle<Tag, HandleType> other) {
- return mHandle == other.mHandle;
- }
- bool operator!=(VkNonDispatchableHandle<Tag, HandleType> other) {
- return mHandle != other.mHandle;
- }
+#if defined(DAWN_USE_X11)
+# define VK_USE_PLATFORM_XLIB_KHR
+# include "common/xlib_with_undefs.h"
+#endif // defined(DAWN_USE_X11)
- // Comparisons between handles and VK_NULL_HANDLE
- bool operator==(std::nullptr_t) {
- return mHandle == 0;
- }
- bool operator!=(std::nullptr_t) {
- return mHandle != 0;
- }
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+# define VK_USE_PLATFORM_METAL_EXT
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
- // The regular Vulkan handle type depends on the pointer width but is always 64 bits wide.
- // - On 64bit it is an opaque pointer type, probably to help with type safety
- // - On 32bit it is a uint64_t because pointers aren't wide enough (and non dispatchable
- // handles can be optimized to not be pointer but contain GPU virtual addresses or the
- // data in a packed form).
- // Because of this we need two types of conversions from our handle type: to uint64_t and to
- // the "native" Vulkan type that may not be an uint64_t
+#if defined(DAWN_PLATFORM_ANDROID)
+# define VK_USE_PLATFORM_ANDROID_KHR
+#endif // defined(DAWN_PLATFORM_ANDROID)
- static VkNonDispatchableHandle<Tag, HandleType> CreateFromU64(uint64_t handle) {
- return {handle};
- }
+#if defined(DAWN_PLATFORM_FUCHSIA)
+# define VK_USE_PLATFORM_FUCHSIA
+#endif // defined(DAWN_PLATFORM_FUCHSIA)
- uint64_t GetU64() const {
- return mHandle;
- }
+// The actual inclusion of vulkan.h!
+#define VK_NO_PROTOTYPES
+#include <vulkan/vulkan.h>
+// Redefine VK_NULL_HANDLE for better type safety where possible.
+#undef VK_NULL_HANDLE
#if defined(DAWN_PLATFORM_64_BIT)
- static VkNonDispatchableHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
- return CreateFromU64(static_cast<uint64_t>(reinterpret_cast<intptr_t>(handle)));
- }
-
- HandleType GetHandle() const {
- return mHandle;
- }
+static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
#elif defined(DAWN_PLATFORM_32_BIT)
- static VkNonDispatchableHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
- return {handle};
- }
-
- HandleType GetHandle() const {
- return mHandle;
- }
+static constexpr uint64_t VK_NULL_HANDLE = 0;
#else
# error "Unsupported platform"
#endif
- private:
- VkNonDispatchableHandle(uint64_t handle) : mHandle(handle) {
- }
-
- uint64_t mHandle = 0;
-};
-
-#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
- struct VkTag##object; \
- DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
- using object = VkNonDispatchableHandle<VkTag##object, object##Native>; \
- static_assert(sizeof(object) == sizeof(uint64_t), ""); \
- static_assert(alignof(object) == kUint64Alignment, ""); \
- static_assert(sizeof(object) == sizeof(object##Native), ""); \
- static_assert(alignof(object) == kNativeVkHandleAlignment, "");
-
-# include <vulkan/vulkan.h>
-
- // VK_NULL_HANDLE is defined to 0 but we don't want our handle type to compare to arbitrary
- // integers. Redefine VK_NULL_HANDLE to nullptr that has its own type.
-# undef VK_NULL_HANDLE
-# define VK_NULL_HANDLE nullptr
-
-// Remove windows.h macros after vulkan_platform's include of windows.h
-#if defined(DAWN_PLATFORM_WINDOWS)
-# include "common/windows_with_undefs.h"
-#endif
-// Remove X11/Xlib.h macros after vulkan_platform's include of it.
-#if defined(DAWN_USE_X11)
-# include "common/xlib_with_undefs.h"
-#endif
-
// Include Fuchsia-specific definitions that are not upstreamed yet.
#if defined(DAWN_PLATFORM_FUCHSIA)
# include <vulkan/vulkan_fuchsia_extras.h>
-#endif
+#endif // defined(DAWN_PLATFORM_FUCHSIA)
#endif // COMMON_VULKANPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/common/windows_with_undefs.h b/chromium/third_party/dawn/src/common/windows_with_undefs.h
index e19552f30a8..381116a0243 100644
--- a/chromium/third_party/dawn/src/common/windows_with_undefs.h
+++ b/chromium/third_party/dawn/src/common/windows_with_undefs.h
@@ -26,6 +26,7 @@
#include <windows.h>
// Macros defined for ANSI / Unicode support
+#undef CreateWindow
#undef GetMessage
// Macros defined to produce compiler intrinsics
diff --git a/chromium/third_party/dawn/src/common/xlib_with_undefs.h b/chromium/third_party/dawn/src/common/xlib_with_undefs.h
index 794ce0fb494..f82a19aa2d3 100644
--- a/chromium/third_party/dawn/src/common/xlib_with_undefs.h
+++ b/chromium/third_party/dawn/src/common/xlib_with_undefs.h
@@ -30,4 +30,6 @@
#undef None
#undef Always
+using XErrorHandler = int (*)(Display*, XErrorEvent*);
+
#endif // COMMON_XLIB_WITH_UNDEFS_H_
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index 2ee0996b9f6..fdfa5e9b9cb 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -32,12 +32,19 @@ group("libdawn") {
dawn_json_generator("dawn_headers_gen") {
target = "dawn_headers"
outputs = [
- "src/include/dawn/dawn.h",
"src/include/dawn/dawn_proc_table.h",
"src/include/dawn/webgpu.h",
]
}
+dawn_json_generator("emscripten_bits_gen") {
+ target = "emscripten_bits"
+ outputs = [
+ "src/dawn/webgpu_struct_info.json",
+ "src/dawn/library_webgpu_enum_tables.js",
+ ]
+}
+
source_set("dawn_headers") {
all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
public_deps = [
@@ -55,7 +62,6 @@ source_set("dawn_headers") {
dawn_json_generator("dawncpp_headers_gen") {
target = "dawncpp_headers"
outputs = [
- "src/include/dawn/dawncpp.h",
"src/include/dawn/webgpu_cpp.h",
]
}
diff --git a/chromium/third_party/dawn/src/dawn/CMakeLists.txt b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
new file mode 100644
index 00000000000..0517343216c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn/CMakeLists.txt
@@ -0,0 +1,85 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+###############################################################################
+# Dawn headers
+###############################################################################
+
+DawnJSONGenerator(
+ TARGET "dawn_headers"
+ PRINT_NAME "Dawn headers"
+ RESULT_VARIABLE "DAWN_HEADERS_GEN_SOURCES"
+)
+
+# Headers only INTERFACE library with generated headers don't work in CMake
+# because the GENERATED property is local to a directory. Instead we make a
+# STATIC library with a Dummy cpp file.
+#
+# INTERFACE libraries can only have INTERFACE sources so the sources get added
+# to the dependant's list of sources. If these dependents are in another
+# directory, they don't see the GENERATED property and fail to configure
+# because the file doesn't exist on disk.
+add_library(dawn_headers STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_headers PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h"
+ ${DAWN_HEADERS_GEN_SOURCES}
+)
+target_link_libraries(dawn_headers INTERFACE dawn_public_config)
+
+###############################################################################
+# Dawn C++ headers
+###############################################################################
+
+DawnJSONGenerator(
+ TARGET "dawncpp_headers"
+ PRINT_NAME "Dawn C++ headers"
+ RESULT_VARIABLE "DAWNCPP_HEADERS_GEN_SOURCES"
+)
+
+# This headers only library needs to be a STATIC library, see comment for
+# dawn_headers above.
+add_library(dawncpp_headers STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawncpp_headers PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h"
+ ${DAWNCPP_HEADERS_GEN_SOURCES}
+)
+target_link_libraries(dawncpp_headers INTERFACE dawn_headers)
+
+###############################################################################
+# Dawn C++ wrapper
+###############################################################################
+
+DawnJSONGenerator(
+ TARGET "dawncpp"
+ PRINT_NAME "Dawn C++ wrapper"
+ RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
+)
+
+add_library(dawncpp STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES})
+target_link_libraries(dawncpp PUBLIC dawncpp_headers)
+
+###############################################################################
+# libdawn_proc
+###############################################################################
+
+DawnJSONGenerator(
+ TARGET "dawn_proc"
+ PRINT_NAME "Dawn C++ wrapper"
+ RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
+)
+
+add_library(dawn_proc STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_proc PRIVATE ${DAWNPROC_GEN_SOURCES})
+target_link_libraries(dawn_proc PUBLIC dawn_headers)
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
index 02f09663998..aa3ede7e23e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
@@ -18,16 +18,16 @@
namespace dawn_native {
- AdapterBase::AdapterBase(InstanceBase* instance, BackendType backend)
+ AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
: mInstance(instance), mBackend(backend) {
}
- BackendType AdapterBase::GetBackendType() const {
+ wgpu::BackendType AdapterBase::GetBackendType() const {
return mBackend;
}
- DeviceType AdapterBase::GetDeviceType() const {
- return mDeviceType;
+ wgpu::AdapterType AdapterBase::GetAdapterType() const {
+ return mAdapterType;
}
const PCIInfo& AdapterBase::GetPCIInfo() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.h b/chromium/third_party/dawn/src/dawn_native/Adapter.h
index 410a9a35a46..d0089349abd 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.h
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.h
@@ -19,6 +19,9 @@
#include "dawn_native/Error.h"
#include "dawn_native/Extensions.h"
+#include "dawn_native/dawn_platform.h"
+
+#include <string>
namespace dawn_native {
@@ -26,11 +29,11 @@ namespace dawn_native {
class AdapterBase {
public:
- AdapterBase(InstanceBase* instance, BackendType backend);
+ AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
virtual ~AdapterBase() = default;
- BackendType GetBackendType() const;
- DeviceType GetDeviceType() const;
+ wgpu::BackendType GetBackendType() const;
+ wgpu::AdapterType GetAdapterType() const;
const PCIInfo& GetPCIInfo() const;
InstanceBase* GetInstance() const;
@@ -43,7 +46,7 @@ namespace dawn_native {
protected:
PCIInfo mPCIInfo = {};
- DeviceType mDeviceType = DeviceType::Unknown;
+ wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
ExtensionsSet mSupportedExtensions;
private:
@@ -52,7 +55,7 @@ namespace dawn_native {
MaybeError CreateDeviceInternal(DeviceBase** result, const DeviceDescriptor* descriptor);
InstanceBase* mInstance = nullptr;
- BackendType mBackend;
+ wgpu::BackendType mBackend;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp b/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp
index 5a9d4b169f1..09ef4ef76c7 100644
--- a/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BackendConnection.cpp
@@ -16,11 +16,11 @@
namespace dawn_native {
- BackendConnection::BackendConnection(InstanceBase* instance, BackendType type)
+ BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
: mInstance(instance), mType(type) {
}
- BackendType BackendConnection::GetType() const {
+ wgpu::BackendType BackendConnection::GetType() const {
return mType;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/BackendConnection.h b/chromium/third_party/dawn/src/dawn_native/BackendConnection.h
index e0e56994eca..f17108ec585 100644
--- a/chromium/third_party/dawn/src/dawn_native/BackendConnection.h
+++ b/chromium/third_party/dawn/src/dawn_native/BackendConnection.h
@@ -26,10 +26,10 @@ namespace dawn_native {
// backend.
class BackendConnection {
public:
- BackendConnection(InstanceBase* instance, BackendType type);
+ BackendConnection(InstanceBase* instance, wgpu::BackendType type);
virtual ~BackendConnection() = default;
- BackendType GetType() const;
+ wgpu::BackendType GetType() const;
InstanceBase* GetInstance() const;
// Returns all the adapters for the system that can be created by the backend, without extra
@@ -42,7 +42,7 @@ namespace dawn_native {
private:
InstanceBase* mInstance = nullptr;
- BackendType mType;
+ wgpu::BackendType mType;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index 9e85be14dae..1eae8f2c381 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -64,9 +64,7 @@ namespace dawn_native {
MaybeError ValidateTextureBinding(const DeviceBase* device,
const BindGroupBinding& binding,
wgpu::TextureUsage requiredUsage,
- bool multisampledBinding,
- wgpu::TextureComponentType requiredComponentType,
- wgpu::TextureViewDimension requiredDimension) {
+ const BindingInfo& bindingInfo) {
if (binding.textureView == nullptr || binding.sampler != nullptr ||
binding.buffer != nullptr) {
return DAWN_VALIDATION_ERROR("expected texture binding");
@@ -79,15 +77,29 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("texture binding usage mismatch");
}
- if (texture->IsMultisampledTexture() != multisampledBinding) {
+ if (texture->IsMultisampledTexture() != bindingInfo.multisampled) {
return DAWN_VALIDATION_ERROR("texture multisampling mismatch");
}
- if (!texture->GetFormat().HasComponentType(requiredComponentType)) {
- return DAWN_VALIDATION_ERROR("texture component type usage mismatch");
+ switch (requiredUsage) {
+ case wgpu::TextureUsage::Sampled: {
+ if (!texture->GetFormat().HasComponentType(bindingInfo.textureComponentType)) {
+ return DAWN_VALIDATION_ERROR("texture component type usage mismatch");
+ }
+ break;
+ }
+ case wgpu::TextureUsage::Storage: {
+ if (texture->GetFormat().format != bindingInfo.storageTextureFormat) {
+ return DAWN_VALIDATION_ERROR("storage texture format mismatch");
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- if (binding.textureView->GetDimension() != requiredDimension) {
+ if (binding.textureView->GetDimension() != bindingInfo.textureDimension) {
return DAWN_VALIDATION_ERROR("texture view dimension mismatch");
}
@@ -114,35 +126,32 @@ namespace dawn_native {
}
DAWN_TRY(device->ValidateObject(descriptor->layout));
-
- const BindGroupLayoutBase::LayoutBindingInfo& layoutInfo =
- descriptor->layout->GetBindingInfo();
-
- if (descriptor->bindingCount != layoutInfo.mask.count()) {
+ if (descriptor->bindingCount != descriptor->layout->GetBindingCount()) {
return DAWN_VALIDATION_ERROR("numBindings mismatch");
}
+ const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+
std::bitset<kMaxBindingsPerGroup> bindingsSet;
for (uint32_t i = 0; i < descriptor->bindingCount; ++i) {
const BindGroupBinding& binding = descriptor->bindings[i];
- uint32_t bindingIndex = binding.binding;
- // Check that we can set this binding.
- if (bindingIndex >= kMaxBindingsPerGroup) {
- return DAWN_VALIDATION_ERROR("binding index too high");
- }
-
- if (!layoutInfo.mask[bindingIndex]) {
+ const auto& it = bindingMap.find(BindingNumber(binding.binding));
+ if (it == bindingMap.end()) {
return DAWN_VALIDATION_ERROR("setting non-existent binding");
}
+ BindingIndex bindingIndex = it->second;
+ ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
if (bindingsSet[bindingIndex]) {
return DAWN_VALIDATION_ERROR("binding set twice");
}
bindingsSet.set(bindingIndex);
+ const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+
// Perform binding-type specific validation.
- switch (layoutInfo.types[bindingIndex]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
DAWN_TRY(ValidateBufferBinding(device, binding, wgpu::BufferUsage::Uniform));
break;
@@ -152,13 +161,18 @@ namespace dawn_native {
break;
case wgpu::BindingType::SampledTexture:
DAWN_TRY(ValidateTextureBinding(device, binding, wgpu::TextureUsage::Sampled,
- layoutInfo.multisampled[bindingIndex],
- layoutInfo.textureComponentTypes[bindingIndex],
- layoutInfo.textureDimensions[bindingIndex]));
+ bindingInfo));
break;
case wgpu::BindingType::Sampler:
DAWN_TRY(ValidateSamplerBinding(device, binding));
break;
+ // TODO(jiawei.shao@intel.com): support creating bind group with read-only and
+ // write-only storage textures.
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ DAWN_TRY(ValidateTextureBinding(device, binding, wgpu::TextureUsage::Storage,
+ bindingInfo));
+ break;
case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
@@ -170,50 +184,70 @@ namespace dawn_native {
// - Each binding must be set at most once
//
// We don't validate the equality because it wouldn't be possible to cover it with a test.
- ASSERT(bindingsSet == layoutInfo.mask);
+ ASSERT(bindingsSet.count() == bindingMap.size());
return {};
- }
+ } // anonymous namespace
// BindGroup
- BindGroupBase::BindGroupBase(DeviceBase* device, const BindGroupDescriptor* descriptor)
- : ObjectBase(device), mLayout(descriptor->layout) {
+ BindGroupBase::BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart)
+ : ObjectBase(device),
+ mLayout(descriptor->layout),
+ mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+ for (BindingIndex i = 0; i < mLayout->GetBindingCount(); ++i) {
+ // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+ // This is to fill Ref<ObjectBase> holes with nullptrs.
+ new (&mBindingData.bindings[i]) Ref<ObjectBase>();
+ }
+
for (uint32_t i = 0; i < descriptor->bindingCount; ++i) {
const BindGroupBinding& binding = descriptor->bindings[i];
- uint32_t bindingIndex = binding.binding;
- ASSERT(bindingIndex < kMaxBindingsPerGroup);
+ BindingIndex bindingIndex =
+ descriptor->layout->GetBindingIndex(BindingNumber(binding.binding));
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
// Only a single binding type should be set, so once we found it we can skip to the
// next loop iteration.
if (binding.buffer != nullptr) {
- ASSERT(mBindings[bindingIndex].Get() == nullptr);
- mBindings[bindingIndex] = binding.buffer;
- mOffsets[bindingIndex] = binding.offset;
+ ASSERT(mBindingData.bindings[bindingIndex].Get() == nullptr);
+ mBindingData.bindings[bindingIndex] = binding.buffer;
+ mBindingData.bufferData[bindingIndex].offset = binding.offset;
uint64_t bufferSize =
(binding.size == wgpu::kWholeSize) ? binding.buffer->GetSize() : binding.size;
- mSizes[bindingIndex] = bufferSize;
+ mBindingData.bufferData[bindingIndex].size = bufferSize;
continue;
}
if (binding.textureView != nullptr) {
- ASSERT(mBindings[bindingIndex].Get() == nullptr);
- mBindings[bindingIndex] = binding.textureView;
+ ASSERT(mBindingData.bindings[bindingIndex].Get() == nullptr);
+ mBindingData.bindings[bindingIndex] = binding.textureView;
continue;
}
if (binding.sampler != nullptr) {
- ASSERT(mBindings[bindingIndex].Get() == nullptr);
- mBindings[bindingIndex] = binding.sampler;
+ ASSERT(mBindingData.bindings[bindingIndex].Get() == nullptr);
+ mBindingData.bindings[bindingIndex] = binding.sampler;
continue;
}
}
}
+ BindGroupBase::~BindGroupBase() {
+ if (mLayout) {
+ ASSERT(!IsError());
+ for (BindingIndex i = 0; i < mLayout->GetBindingCount(); ++i) {
+ mBindingData.bindings[i].~Ref<ObjectBase>();
+ }
+ }
+ }
+
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : ObjectBase(device, tag), mBindingData() {
}
// static
@@ -226,32 +260,30 @@ namespace dawn_native {
return mLayout.Get();
}
- BufferBinding BindGroupBase::GetBindingAsBufferBinding(size_t binding) {
+ BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::UniformBuffer ||
- mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::StorageBuffer ||
- mLayout->GetBindingInfo().types[binding] ==
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).type == wgpu::BindingType::UniformBuffer ||
+ mLayout->GetBindingInfo(bindingIndex).type == wgpu::BindingType::StorageBuffer ||
+ mLayout->GetBindingInfo(bindingIndex).type ==
wgpu::BindingType::ReadonlyStorageBuffer);
- BufferBase* buffer = static_cast<BufferBase*>(mBindings[binding].Get());
- return {buffer, mOffsets[binding], mSizes[binding]};
+ BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+ return {buffer, mBindingData.bufferData[bindingIndex].offset,
+ mBindingData.bufferData[bindingIndex].size};
}
- SamplerBase* BindGroupBase::GetBindingAsSampler(size_t binding) {
+ SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) {
ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::Sampler);
- return static_cast<SamplerBase*>(mBindings[binding].Get());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).type == wgpu::BindingType::Sampler);
+ return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
}
- TextureViewBase* BindGroupBase::GetBindingAsTextureView(size_t binding) {
+ TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
ASSERT(!IsError());
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::SampledTexture);
- return static_cast<TextureViewBase*>(mBindings[binding].Get());
+ ASSERT(bindingIndex < mLayout->GetBindingCount());
+ ASSERT(mLayout->GetBindingInfo(bindingIndex).type == wgpu::BindingType::SampledTexture);
+ return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.h b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
index fae804d1235..becb39ee753 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_BINDGROUP_H_
#include "common/Constants.h"
+#include "common/Math.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
@@ -40,22 +41,40 @@ namespace dawn_native {
class BindGroupBase : public ObjectBase {
public:
- BindGroupBase(DeviceBase* device, const BindGroupDescriptor* descriptor);
+ ~BindGroupBase() override;
static BindGroupBase* MakeError(DeviceBase* device);
BindGroupLayoutBase* GetLayout();
- BufferBinding GetBindingAsBufferBinding(size_t binding);
- SamplerBase* GetBindingAsSampler(size_t binding);
- TextureViewBase* GetBindingAsTextureView(size_t binding);
+ BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+ SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex);
+ TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+
+ protected:
+ // To save memory, the size of a bind group is dynamically determined and the bind group is
+ // placement-allocated into memory big enough to hold the bind group with its
+ // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+ // binding data should be passed as |bindingDataStart|.
+ BindGroupBase(DeviceBase* device,
+ const BindGroupDescriptor* descriptor,
+ void* bindingDataStart);
+
+ // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+ // be first in the allocation. The binding data is stored after the Derived class.
+ template <typename Derived>
+ BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(device,
+ descriptor,
+ AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+ descriptor->layout->GetBindingDataAlignment())) {
+ static_assert(std::is_base_of<BindGroupBase, Derived>::value, "");
+ }
private:
BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
Ref<BindGroupLayoutBase> mLayout;
- std::array<Ref<ObjectBase>, kMaxBindingsPerGroup> mBindings;
- std::array<uint32_t, kMaxBindingsPerGroup> mOffsets;
- std::array<uint32_t, kMaxBindingsPerGroup> mSizes;
+ BindGroupLayoutBase::BindingDataPointers mBindingData;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
index 0c016e1be7f..3b0849fd8c5 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
@@ -15,9 +15,8 @@
#ifndef DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
#define DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
-#include "dawn_native/BindGroupTracker.h"
-
#include "dawn_native/BindGroup.h"
+#include "dawn_native/BindGroupTracker.h"
namespace dawn_native {
@@ -39,15 +38,17 @@ namespace dawn_native {
mBuffersNeedingBarrier[index] = {};
const BindGroupLayoutBase* layout = bindGroup->GetLayout();
- const auto& info = layout->GetBindingInfo();
- for (uint32_t binding : IterateBitSet(info.mask)) {
- if ((info.visibilities[binding] & wgpu::ShaderStage::Compute) == 0) {
+ for (BindingIndex bindingIndex = 0; bindingIndex < layout->GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+
+ if ((bindingInfo.visibility & wgpu::ShaderStage::Compute) == 0) {
continue;
}
- mBindingTypes[index][binding] = info.types[binding];
- switch (info.types[binding]) {
+ mBindingTypes[index][bindingIndex] = bindingInfo.type;
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
case wgpu::BindingType::Sampler:
@@ -56,12 +57,14 @@ namespace dawn_native {
break;
case wgpu::BindingType::StorageBuffer:
- mBuffersNeedingBarrier[index].set(binding);
- mBuffers[index][binding] =
- bindGroup->GetBindingAsBufferBinding(binding).buffer;
+ mBuffersNeedingBarrier[index].set(bindingIndex);
+ mBuffers[index][bindingIndex] =
+ bindGroup->GetBindingAsBufferBinding(bindingIndex).buffer;
break;
case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
// Not implemented.
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index 3d0ecd2d36b..b65f4692042 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -20,20 +20,91 @@
#include "dawn_native/ValidationUtils_autogen.h"
#include <functional>
+#include <set>
namespace dawn_native {
- MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase*,
+ MaybeError ValidateBindingTypeWithShaderStageVisibility(
+ wgpu::BindingType bindingType,
+ wgpu::ShaderStage shaderStageVisibility) {
+ // TODO(jiawei.shao@intel.com): support read-write storage textures.
+ switch (bindingType) {
+ case wgpu::BindingType::StorageBuffer: {
+ if ((shaderStageVisibility & wgpu::ShaderStage::Vertex) != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "storage buffer binding is not supported in vertex shader");
+ }
+ break;
+ }
+
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ if ((shaderStageVisibility &
+ (wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment)) != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "write-only storage texture binding is only supported in compute shader");
+ }
+ break;
+ }
+
+ case wgpu::BindingType::StorageTexture: {
+ return DAWN_VALIDATION_ERROR("Read-write storage texture binding is not supported");
+ }
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+ wgpu::BindingType bindingType,
+ wgpu::TextureFormat storageTextureFormat) {
+ switch (bindingType) {
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ DAWN_TRY(ValidateTextureFormat(storageTextureFormat));
+
+ const Format* format = nullptr;
+ DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+ ASSERT(format != nullptr);
+ if (!format->supportsStorageUsage) {
+ return DAWN_VALIDATION_ERROR("The storage texture format is not supported");
+ }
+ break;
+ }
+
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- std::bitset<kMaxBindingsPerGroup> bindingsSet;
+ std::set<BindingNumber> bindingsSet;
uint32_t dynamicUniformBufferCount = 0;
uint32_t dynamicStorageBufferCount = 0;
- for (uint32_t i = 0; i < descriptor->bindingCount; ++i) {
+ for (BindingIndex i = 0; i < descriptor->bindingCount; ++i) {
const BindGroupLayoutBinding& binding = descriptor->bindings[i];
+ BindingNumber bindingNumber = BindingNumber(binding.binding);
+
DAWN_TRY(ValidateShaderStage(binding.visibility));
DAWN_TRY(ValidateBindingType(binding.type));
DAWN_TRY(ValidateTextureComponentType(binding.textureComponentType));
@@ -42,13 +113,16 @@ namespace dawn_native {
DAWN_TRY(ValidateTextureViewDimension(binding.textureDimension));
}
- if (binding.binding >= kMaxBindingsPerGroup) {
- return DAWN_VALIDATION_ERROR("some binding index exceeds the maximum value");
- }
- if (bindingsSet[binding.binding]) {
+ if (bindingsSet.count(bindingNumber) != 0) {
return DAWN_VALIDATION_ERROR("some binding index was specified more than once");
}
+ DAWN_TRY(
+ ValidateBindingTypeWithShaderStageVisibility(binding.type, binding.visibility));
+
+ DAWN_TRY(
+ ValidateStorageTextureFormat(device, binding.type, binding.storageTextureFormat));
+
switch (binding.type) {
case wgpu::BindingType::UniformBuffer:
if (binding.hasDynamicOffset) {
@@ -63,6 +137,8 @@ namespace dawn_native {
break;
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
if (binding.hasDynamicOffset) {
return DAWN_VALIDATION_ERROR("Samplers and textures cannot be dynamic");
}
@@ -76,7 +152,11 @@ namespace dawn_native {
"BindGroupLayoutBinding::multisampled must be false (for now)");
}
- bindingsSet.set(binding.binding);
+ bindingsSet.insert(bindingNumber);
+ }
+
+ if (bindingsSet.size() > kMaxBindingsPerGroup) {
+ return DAWN_VALIDATION_ERROR("The number of bindings exceeds kMaxBindingsPerGroup.");
}
if (dynamicUniformBufferCount > kMaxDynamicUniformBufferCount) {
@@ -93,58 +173,125 @@ namespace dawn_native {
}
namespace {
- size_t HashBindingInfo(const BindGroupLayoutBase::LayoutBindingInfo& info) {
- size_t hash = Hash(info.mask);
- HashCombine(&hash, info.hasDynamicOffset, info.multisampled);
- for (uint32_t binding : IterateBitSet(info.mask)) {
- HashCombine(&hash, info.visibilities[binding], info.types[binding],
- info.textureComponentTypes[binding], info.textureDimensions[binding]);
- }
+ void HashCombineBindingInfo(size_t* hash, const BindingInfo& info) {
+ HashCombine(hash, info.hasDynamicOffset, info.multisampled, info.visibility, info.type,
+ info.textureComponentType, info.textureDimension,
+ info.storageTextureFormat);
+ }
- return hash;
+ bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+ return a.hasDynamicOffset != b.hasDynamicOffset || //
+ a.multisampled != b.multisampled || //
+ a.visibility != b.visibility || //
+ a.type != b.type || //
+ a.textureComponentType != b.textureComponentType || //
+ a.textureDimension != b.textureDimension || //
+ a.storageTextureFormat != b.storageTextureFormat;
}
- bool operator==(const BindGroupLayoutBase::LayoutBindingInfo& a,
- const BindGroupLayoutBase::LayoutBindingInfo& b) {
- if (a.mask != b.mask || a.hasDynamicOffset != b.hasDynamicOffset ||
- a.multisampled != b.multisampled) {
- return false;
+ bool SortBindingsCompare(const BindGroupLayoutBinding& a, const BindGroupLayoutBinding& b) {
+ if (a.hasDynamicOffset != b.hasDynamicOffset) {
+ // Buffers with dynamic offsets should come before those without.
+ // This makes it easy to iterate over the dynamic buffer bindings
+ // [0, dynamicBufferCount) during validation.
+ return a.hasDynamicOffset > b.hasDynamicOffset;
+ }
+ if (a.type != b.type) {
+ // Buffers have smaller type enums. They should be placed first.
+ return a.type < b.type;
+ }
+ if (a.visibility != b.visibility) {
+ return a.visibility < b.visibility;
+ }
+ if (a.multisampled != b.multisampled) {
+ return a.multisampled < b.multisampled;
+ }
+ if (a.textureDimension != b.textureDimension) {
+ return a.textureDimension < b.textureDimension;
}
+ if (a.textureComponentType != b.textureComponentType) {
+ return a.textureComponentType < b.textureComponentType;
+ }
+ if (a.storageTextureFormat != b.storageTextureFormat) {
+ return a.storageTextureFormat < b.storageTextureFormat;
+ }
+ return false;
+ }
+
+ // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+ // first.
+ bool CheckBufferBindingsFirst(const BindingInfo* bindings, BindingIndex count) {
+ ASSERT(count <= kMaxBindingsPerGroup);
- for (uint32_t binding : IterateBitSet(a.mask)) {
- if ((a.visibilities[binding] != b.visibilities[binding]) ||
- (a.types[binding] != b.types[binding]) ||
- (a.textureComponentTypes[binding] != b.textureComponentTypes[binding]) ||
- (a.textureDimensions[binding] != b.textureDimensions[binding])) {
- return false;
+ BindingIndex lastBufferIndex = 0;
+ BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+ for (BindingIndex i = 0; i < count; ++i) {
+ switch (bindings[i].type) {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ lastBufferIndex = std::max(i, lastBufferIndex);
+ break;
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ firstNonBufferIndex = std::min(i, firstNonBufferIndex);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
- return true;
+ // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+ // |firstNonBufferIndex| gets set to 0.
+ return firstNonBufferIndex >= lastBufferIndex;
}
+
} // namespace
// BindGroupLayoutBase
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor)
- : CachedObject(device) {
- for (uint32_t i = 0; i < descriptor->bindingCount; ++i) {
- auto& binding = descriptor->bindings[i];
+ : CachedObject(device), mBindingCount(descriptor->bindingCount) {
+ std::vector<BindGroupLayoutBinding> sortedBindings(
+ descriptor->bindings, descriptor->bindings + descriptor->bindingCount);
+
+ std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
+
+ for (BindingIndex i = 0; i < mBindingCount; ++i) {
+ const BindGroupLayoutBinding& binding = sortedBindings[i];
+ mBindingInfo[i].type = binding.type;
+ mBindingInfo[i].visibility = binding.visibility;
+ mBindingInfo[i].textureComponentType =
+ Format::TextureComponentTypeToFormatType(binding.textureComponentType);
+ mBindingInfo[i].storageTextureFormat = binding.storageTextureFormat;
- uint32_t index = binding.binding;
- mBindingInfo.visibilities[index] = binding.visibility;
- mBindingInfo.types[index] = binding.type;
- mBindingInfo.textureComponentTypes[index] = binding.textureComponentType;
+ switch (binding.type) {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ // Buffers must be contiguously packed at the start of the binding info.
+ ASSERT(mBufferCount == i);
+ ++mBufferCount;
+ break;
+ default:
+ break;
+ }
if (binding.textureDimension == wgpu::TextureViewDimension::Undefined) {
- mBindingInfo.textureDimensions[index] = wgpu::TextureViewDimension::e2D;
+ mBindingInfo[i].textureDimension = wgpu::TextureViewDimension::e2D;
} else {
- mBindingInfo.textureDimensions[index] = binding.textureDimension;
+ mBindingInfo[i].textureDimension = binding.textureDimension;
}
+
+ mBindingInfo[i].multisampled = binding.multisampled;
+ mBindingInfo[i].hasDynamicOffset = binding.hasDynamicOffset;
if (binding.hasDynamicOffset) {
- mBindingInfo.hasDynamicOffset.set(index);
switch (binding.type) {
case wgpu::BindingType::UniformBuffer:
++mDynamicUniformBufferCount;
@@ -156,16 +303,17 @@ namespace dawn_native {
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
}
- mBindingInfo.multisampled.set(index, binding.multisampled);
-
- ASSERT(!mBindingInfo.mask[index]);
- mBindingInfo.mask.set(index);
+ const auto& it = mBindingMap.emplace(BindingNumber(binding.binding), i);
+ ASSERT(it.second);
}
+ ASSERT(CheckBufferBindingsFirst(mBindingInfo.data(), mBindingCount));
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -184,21 +332,47 @@ namespace dawn_native {
return new BindGroupLayoutBase(device, ObjectBase::kError);
}
- const BindGroupLayoutBase::LayoutBindingInfo& BindGroupLayoutBase::GetBindingInfo() const {
+ const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+ ASSERT(!IsError());
+ return mBindingMap;
+ }
+
+ BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
ASSERT(!IsError());
- return mBindingInfo;
+ const auto& it = mBindingMap.find(bindingNumber);
+ ASSERT(it != mBindingMap.end());
+ return it->second;
}
size_t BindGroupLayoutBase::HashFunc::operator()(const BindGroupLayoutBase* bgl) const {
- return HashBindingInfo(bgl->mBindingInfo);
+ size_t hash = 0;
+ // std::map is sorted by key, so two BGLs constructed in different orders
+ // will still hash the same.
+ for (const auto& it : bgl->mBindingMap) {
+ HashCombine(&hash, it.first, it.second);
+ HashCombineBindingInfo(&hash, bgl->mBindingInfo[it.second]);
+ }
+ return hash;
}
bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
const BindGroupLayoutBase* b) const {
- return a->mBindingInfo == b->mBindingInfo;
+ if (a->GetBindingCount() != b->GetBindingCount()) {
+ return false;
+ }
+ for (BindingIndex i = 0; i < a->GetBindingCount(); ++i) {
+ if (a->mBindingInfo[i] != b->mBindingInfo[i]) {
+ return false;
+ }
+ }
+ return a->mBindingMap == b->mBindingMap;
+ }
+
+ BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+ return mBindingCount;
}
- uint32_t BindGroupLayoutBase::GetDynamicBufferCount() const {
+ BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
return mDynamicStorageBufferCount + mDynamicUniformBufferCount;
}
@@ -210,4 +384,23 @@ namespace dawn_native {
return mDynamicStorageBufferCount;
}
+ size_t BindGroupLayoutBase::GetBindingDataSize() const {
+ // | ------ buffer-specific ----------| ------------ object pointers -------------|
+ // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+ size_t objectPointerStart = mBufferCount * sizeof(BufferBindingData);
+ ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+ return objectPointerStart + mBindingCount * sizeof(Ref<ObjectBase>);
+ }
+
+ BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+ void* dataStart) const {
+ BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+ auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBufferCount);
+
+ ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+ ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+
+ return {bufferData, bindings};
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 4c0dd7aae8d..21a79129a0f 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -16,6 +16,9 @@
#define DAWNNATIVE_BINDGROUPLAYOUT_H_
#include "common/Constants.h"
+#include "common/Math.h"
+#include "common/SlabAllocator.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
@@ -24,12 +27,24 @@
#include <array>
#include <bitset>
+#include <map>
namespace dawn_native {
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase*,
const BindGroupLayoutDescriptor* descriptor);
+ MaybeError ValidateBindingTypeWithShaderStageVisibility(
+ wgpu::BindingType bindingType,
+ wgpu::ShaderStage shaderStageVisibility);
+
+ MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+ wgpu::BindingType bindingType,
+ wgpu::TextureFormat storageTextureFormat);
+
+ // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+ // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+ // into a packed range of |BindingIndex| integers.
class BindGroupLayoutBase : public CachedObject {
public:
BindGroupLayoutBase(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
@@ -37,16 +52,16 @@ namespace dawn_native {
static BindGroupLayoutBase* MakeError(DeviceBase* device);
- struct LayoutBindingInfo {
- std::array<wgpu::ShaderStage, kMaxBindingsPerGroup> visibilities;
- std::array<wgpu::BindingType, kMaxBindingsPerGroup> types;
- std::array<wgpu::TextureComponentType, kMaxBindingsPerGroup> textureComponentTypes;
- std::array<wgpu::TextureViewDimension, kMaxBindingsPerGroup> textureDimensions;
- std::bitset<kMaxBindingsPerGroup> hasDynamicOffset;
- std::bitset<kMaxBindingsPerGroup> multisampled;
- std::bitset<kMaxBindingsPerGroup> mask;
- };
- const LayoutBindingInfo& GetBindingInfo() const;
+ // A map from the BindingNumber to its packed BindingIndex.
+ using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+ const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+ ASSERT(!IsError());
+ ASSERT(bindingIndex < kMaxBindingsPerGroup);
+ return mBindingInfo[bindingIndex];
+ }
+ const BindingMap& GetBindingMap() const;
+ BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
// Functors necessary for the unordered_set<BGLBase*>-based cache.
struct HashFunc {
@@ -56,16 +71,54 @@ namespace dawn_native {
bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
- uint32_t GetDynamicBufferCount() const;
+ BindingIndex GetBindingCount() const;
+ // Returns |BindingIndex| because dynamic buffers are packed at the front.
+ BindingIndex GetDynamicBufferCount() const;
uint32_t GetDynamicUniformBufferCount() const;
uint32_t GetDynamicStorageBufferCount() const;
+ struct BufferBindingData {
+ uint64_t offset;
+ uint64_t size;
+ };
+
+ struct BindingDataPointers {
+ BufferBindingData* const bufferData = nullptr;
+ Ref<ObjectBase>* const bindings = nullptr;
+ };
+
+ // Compute the amount of space / alignment required to store bindings for a bind group of
+ // this layout.
+ size_t GetBindingDataSize() const;
+ static constexpr size_t GetBindingDataAlignment() {
+ static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData), "");
+ return alignof(BufferBindingData);
+ }
+
+ BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+ protected:
+ template <typename BindGroup>
+ SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+ return SlabAllocator<BindGroup>(
+ size, // bytes
+ Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
+ std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
+ );
+ }
+
private:
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- LayoutBindingInfo mBindingInfo;
+ BindingIndex mBindingCount;
+ BindingIndex mBufferCount = 0; // |BindingIndex| because buffers are packed at the front.
uint32_t mDynamicUniformBufferCount = 0;
uint32_t mDynamicStorageBufferCount = 0;
+
+ std::array<BindingInfo, kMaxBindingsPerGroup> mBindingInfo;
+
+ // Map from BindGroupLayoutEntry.binding to packed indices.
+ BindingMap mBindingMap;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
new file mode 100644
index 00000000000..51f37b9b76d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDINGINFO_H_
+#define DAWNNATIVE_BINDINGINFO_H_
+
+#include "dawn_native/Format.h"
+#include "dawn_native/dawn_platform.h"
+
+#include <cstdint>
+
+namespace dawn_native {
+
+ // TODO(enga): Can we have strongly typed integers so you can't convert between them
+ // by accident? And also range-assertions (ex. kMaxBindingsPerGroup) in Debug?
+
+ // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
+ using BindingNumber = uint32_t;
+
+ // Binding numbers get mapped to a packed range of indices
+ using BindingIndex = uint32_t;
+
+ struct BindingInfo {
+ wgpu::ShaderStage visibility;
+ wgpu::BindingType type;
+ Format::Type textureComponentType = Format::Type::Float;
+ wgpu::TextureViewDimension textureDimension = wgpu::TextureViewDimension::Undefined;
+ wgpu::TextureFormat storageTextureFormat = wgpu::TextureFormat::Undefined;
+ bool hasDynamicOffset = false;
+ bool multisampled = false;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_BINDINGINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index c57d23a129c..6502475b16b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/ErrorData.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <cstdio>
@@ -191,6 +192,8 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Buffer used in a submit while mapped");
case BufferState::Unmapped:
return {};
+ default:
+ UNREACHABLE();
}
}
@@ -201,11 +204,17 @@ namespace dawn_native {
ASSERT(!IsError());
if (mMapReadCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapWriteCallback == nullptr);
+
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
WGPUBufferMapReadCallback callback = mMapReadCallback;
mMapReadCallback = nullptr;
- callback(status, pointer, dataLength, mMapUserdata);
+
+ if (GetDevice()->IsLost()) {
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, mMapUserdata);
+ } else {
+ callback(status, pointer, dataLength, mMapUserdata);
+ }
}
}
@@ -216,11 +225,17 @@ namespace dawn_native {
ASSERT(!IsError());
if (mMapWriteCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapReadCallback == nullptr);
+
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
WGPUBufferMapWriteCallback callback = mMapWriteCallback;
mMapWriteCallback = nullptr;
- callback(status, pointer, dataLength, mMapUserdata);
+
+ if (GetDevice()->IsLost()) {
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, mMapUserdata);
+ } else {
+ callback(status, pointer, dataLength, mMapUserdata);
+ }
}
}
@@ -236,8 +251,9 @@ namespace dawn_native {
}
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
- if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead))) {
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
+ WGPUBufferMapAsyncStatus status;
+ if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead, &status))) {
+ callback(status, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
@@ -272,8 +288,9 @@ namespace dawn_native {
}
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
- if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite))) {
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
+ WGPUBufferMapAsyncStatus status;
+ if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
+ callback(status, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
@@ -350,6 +367,7 @@ namespace dawn_native {
}
MaybeError BufferBase::ValidateSetSubData(uint32_t start, uint32_t count) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
@@ -387,7 +405,12 @@ namespace dawn_native {
return {};
}
- MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage) const {
+ MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
+ WGPUBufferMapAsyncStatus* status) const {
+ *status = WGPUBufferMapAsyncStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ *status = WGPUBufferMapAsyncStatus_Error;
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
@@ -403,10 +426,12 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Buffer needs the correct map usage bit");
}
+ *status = WGPUBufferMapAsyncStatus_Success;
return {};
}
MaybeError BufferBase::ValidateUnmap() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
@@ -421,6 +446,8 @@ namespace dawn_native {
return {};
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
+ default:
+ UNREACHABLE();
}
}
@@ -436,4 +463,8 @@ namespace dawn_native {
mState = BufferState::Destroyed;
}
+ bool BufferBase::IsMapped() const {
+ return mState == BufferState::Mapped;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 054e5550456..387ef5c371a 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -82,6 +82,8 @@ namespace dawn_native {
void DestroyInternal();
+ bool IsMapped() const;
+
private:
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0;
virtual MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data);
@@ -94,7 +96,8 @@ namespace dawn_native {
MaybeError CopyFromStagingBuffer();
MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const;
- MaybeError ValidateMap(wgpu::BufferUsage requiredUsage) const;
+ MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
+ WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const;
MaybeError ValidateDestroy() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
new file mode 100644
index 00000000000..0e9bbf69dd4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -0,0 +1,439 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+ TARGET "dawn_native_utils"
+ PRINT_NAME "Dawn native utilities"
+ RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
+)
+
+add_library(dawn_native STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/DawnNative.h"
+ "${DAWN_INCLUDE_DIR}/dawn_native/dawn_native_export.h"
+ ${DAWN_NATIVE_UTILS_GEN_SOURCES}
+ "Adapter.cpp"
+ "Adapter.h"
+ "AttachmentState.cpp"
+ "AttachmentState.h"
+ "BackendConnection.cpp"
+ "BackendConnection.h"
+ "BindGroup.cpp"
+ "BindGroup.h"
+ "BindGroupAndStorageBarrierTracker.h"
+ "BindGroupLayout.cpp"
+ "BindGroupLayout.h"
+ "BindGroupTracker.h"
+ "BindingInfo.h"
+ "BuddyAllocator.cpp"
+ "BuddyAllocator.h"
+ "BuddyMemoryAllocator.cpp"
+ "BuddyMemoryAllocator.h"
+ "Buffer.cpp"
+ "Buffer.h"
+ "CachedObject.cpp"
+ "CachedObject.h"
+ "CommandAllocator.cpp"
+ "CommandAllocator.h"
+ "CommandBuffer.cpp"
+ "CommandBuffer.h"
+ "CommandBufferStateTracker.cpp"
+ "CommandBufferStateTracker.h"
+ "CommandEncoder.cpp"
+ "CommandEncoder.h"
+ "CommandValidation.cpp"
+ "CommandValidation.h"
+ "Commands.cpp"
+ "Commands.h"
+ "ComputePassEncoder.cpp"
+ "ComputePassEncoder.h"
+ "ComputePipeline.cpp"
+ "ComputePipeline.h"
+ "Device.cpp"
+ "Device.h"
+ "DynamicUploader.cpp"
+ "DynamicUploader.h"
+ "EncodingContext.cpp"
+ "EncodingContext.h"
+ "Error.cpp"
+ "Error.h"
+ "ErrorData.cpp"
+ "ErrorData.h"
+ "ErrorInjector.cpp"
+ "ErrorInjector.h"
+ "ErrorScope.cpp"
+ "ErrorScope.h"
+ "ErrorScopeTracker.cpp"
+ "ErrorScopeTracker.h"
+ "Extensions.cpp"
+ "Extensions.h"
+ "Fence.cpp"
+ "Fence.h"
+ "FenceSignalTracker.cpp"
+ "FenceSignalTracker.h"
+ "Format.cpp"
+ "Format.h"
+ "Forward.h"
+ "Instance.cpp"
+ "Instance.h"
+ "ObjectBase.cpp"
+ "ObjectBase.h"
+ "PassResourceUsage.h"
+ "PassResourceUsageTracker.cpp"
+ "PassResourceUsageTracker.h"
+ "PerStage.cpp"
+ "PerStage.h"
+ "Pipeline.cpp"
+ "Pipeline.h"
+ "PipelineLayout.cpp"
+ "PipelineLayout.h"
+ "ProgrammablePassEncoder.cpp"
+ "ProgrammablePassEncoder.h"
+ "Queue.cpp"
+ "Queue.h"
+ "RefCounted.cpp"
+ "RefCounted.h"
+ "RenderBundle.cpp"
+ "RenderBundle.h"
+ "RenderBundleEncoder.cpp"
+ "RenderBundleEncoder.h"
+ "RenderEncoderBase.cpp"
+ "RenderEncoderBase.h"
+ "RenderPassEncoder.cpp"
+ "RenderPassEncoder.h"
+ "RenderPipeline.cpp"
+ "RenderPipeline.h"
+ "ResourceHeap.h"
+ "ResourceHeapAllocator.h"
+ "ResourceMemoryAllocation.cpp"
+ "ResourceMemoryAllocation.h"
+ "RingBufferAllocator.cpp"
+ "RingBufferAllocator.h"
+ "Sampler.cpp"
+ "Sampler.h"
+ "ShaderModule.cpp"
+ "ShaderModule.h"
+ "StagingBuffer.cpp"
+ "StagingBuffer.h"
+ "Surface.cpp"
+ "Surface.h"
+ "SwapChain.cpp"
+ "SwapChain.h"
+ "Texture.cpp"
+ "Texture.h"
+ "ToBackend.h"
+ "Toggles.cpp"
+ "Toggles.h"
+ "dawn_platform.h"
+)
+target_link_libraries(dawn_native
+ PUBLIC dawncpp_headers
+ PRIVATE dawn_common
+ dawn_platform
+ dawn_internal_config
+ shaderc_spvc
+ spirv-cross-core
+)
+
+if (DAWN_USE_X11)
+ find_package(X11 REQUIRED)
+ target_link_libraries(dawn_native PRIVATE ${X11_LIBRARIES})
+endif()
+
+if (DAWN_ENABLE_D3D12)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/D3D12Backend.h"
+ "d3d12/AdapterD3D12.cpp"
+ "d3d12/AdapterD3D12.h"
+ "d3d12/BackendD3D12.cpp"
+ "d3d12/BackendD3D12.h"
+ "d3d12/BindGroupD3D12.cpp"
+ "d3d12/BindGroupD3D12.h"
+ "d3d12/BindGroupLayoutD3D12.cpp"
+ "d3d12/BindGroupLayoutD3D12.h"
+ "d3d12/BufferD3D12.cpp"
+ "d3d12/BufferD3D12.h"
+ "d3d12/CommandAllocatorManager.cpp"
+ "d3d12/CommandAllocatorManager.h"
+ "d3d12/CommandBufferD3D12.cpp"
+ "d3d12/CommandBufferD3D12.h"
+ "d3d12/CommandRecordingContext.cpp"
+ "d3d12/CommandRecordingContext.h"
+ "d3d12/ComputePipelineD3D12.cpp"
+ "d3d12/ComputePipelineD3D12.h"
+ "d3d12/D3D12Error.cpp"
+ "d3d12/D3D12Error.h"
+ "d3d12/D3D12Info.cpp"
+ "d3d12/D3D12Info.h"
+ "d3d12/DescriptorHeapAllocationD3D12.cpp",
+ "d3d12/DescriptorHeapAllocationD3D12.h",
+ "d3d12/DescriptorHeapAllocator.cpp"
+ "d3d12/DescriptorHeapAllocator.h"
+ "d3d12/DeviceD3D12.cpp"
+ "d3d12/DeviceD3D12.h"
+ "d3d12/Forward.h"
+ "d3d12/HeapAllocatorD3D12.cpp"
+ "d3d12/HeapAllocatorD3D12.h"
+ "d3d12/HeapD3D12.cpp"
+ "d3d12/HeapD3D12.h"
+ "d3d12/NativeSwapChainImplD3D12.cpp"
+ "d3d12/NativeSwapChainImplD3D12.h"
+ "d3d12/PipelineLayoutD3D12.cpp"
+ "d3d12/PipelineLayoutD3D12.h"
+ "d3d12/PlatformFunctions.cpp"
+ "d3d12/PlatformFunctions.h"
+ "d3d12/QueueD3D12.cpp"
+ "d3d12/QueueD3D12.h"
+ "d3d12/RenderPassBuilderD3D12.cpp"
+ "d3d12/RenderPassBuilderD3D12.h"
+ "d3d12/RenderPipelineD3D12.cpp"
+ "d3d12/RenderPipelineD3D12.h"
+ "d3d12/ResidencyManagerD3D12.cpp"
+ "d3d12/ResidencyManagerD3D12.h"
+ "d3d12/ResourceAllocatorManagerD3D12.cpp"
+ "d3d12/ResourceAllocatorManagerD3D12.h"
+ "d3d12/ResourceHeapAllocationD3D12.cpp"
+ "d3d12/ResourceHeapAllocationD3D12.h"
+ "d3d12/SamplerD3D12.cpp"
+ "d3d12/SamplerD3D12.h"
+ "d3d12/ShaderModuleD3D12.cpp"
+ "d3d12/ShaderModuleD3D12.h"
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp",
+ "d3d12/ShaderVisibleDescriptorAllocatorD3D12.h",
+ "d3d12/StagingBufferD3D12.cpp"
+ "d3d12/StagingBufferD3D12.h"
+ "d3d12/SwapChainD3D12.cpp"
+ "d3d12/SwapChainD3D12.h"
+ "d3d12/TextureCopySplitter.cpp"
+ "d3d12/TextureCopySplitter.h"
+ "d3d12/TextureD3D12.cpp"
+ "d3d12/TextureD3D12.h"
+ "d3d12/UtilsD3D12.cpp"
+ "d3d12/UtilsD3D12.h"
+ "d3d12/d3d12_platform.h"
+ )
+ target_link_libraries(dawn_native PRIVATE dxguid.lib)
+endif()
+
+if (DAWN_ENABLE_METAL)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/MetalBackend.h"
+ "Surface_metal.mm"
+ "metal/BackendMTL.h"
+ "metal/BackendMTL.mm"
+ "metal/BindGroupLayoutMTL.h"
+ "metal/BindGroupLayoutMTL.mm"
+ "metal/BindGroupMTL.h"
+ "metal/BindGroupMTL.mm"
+ "metal/BufferMTL.h"
+ "metal/BufferMTL.mm"
+ "metal/CommandBufferMTL.h"
+ "metal/CommandBufferMTL.mm"
+ "metal/CommandRecordingContext.h"
+ "metal/CommandRecordingContext.mm"
+ "metal/ComputePipelineMTL.h"
+ "metal/ComputePipelineMTL.mm"
+ "metal/DeviceMTL.h"
+ "metal/DeviceMTL.mm"
+ "metal/Forward.h"
+ "metal/PipelineLayoutMTL.h"
+ "metal/PipelineLayoutMTL.mm"
+ "metal/QueueMTL.h"
+ "metal/QueueMTL.mm"
+ "metal/RenderPipelineMTL.h"
+ "metal/RenderPipelineMTL.mm"
+ "metal/SamplerMTL.h"
+ "metal/SamplerMTL.mm"
+ "metal/ShaderModuleMTL.h"
+ "metal/ShaderModuleMTL.mm"
+ "metal/StagingBufferMTL.h"
+ "metal/StagingBufferMTL.mm"
+ "metal/SwapChainMTL.h"
+ "metal/SwapChainMTL.mm"
+ "metal/TextureMTL.h"
+ "metal/TextureMTL.mm"
+ "metal/UtilsMetal.h"
+ "metal/UtilsMetal.mm"
+ )
+ target_link_libraries(dawn_native PRIVATE
+ "-framework Cocoa"
+ "-framework IOKit"
+ "-framework IOSurface"
+ "-framework QuartzCore"
+ )
+endif()
+
+if (DAWN_ENABLE_NULL)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/NullBackend.h"
+ "null/DeviceNull.cpp"
+ "null/DeviceNull.h"
+ )
+endif()
+
+if (DAWN_ENABLE_OPENGL)
+ DawnGenerator(
+ SCRIPT "${Dawn_SOURCE_DIR}/generator/opengl_loader_generator.py"
+ PRINT_NAME "OpenGL function loader"
+ ARGS "--gl-xml"
+ "${Dawn_SOURCE_DIR}/third_party/khronos/gl.xml"
+ "--supported-extensions"
+ "${Dawn_SOURCE_DIR}/src/dawn_native/opengl/supported_extensions.json"
+ RESULT_VARIABLE "DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES"
+ )
+
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/OpenGLBackend.h"
+ ${DAWN_NATIVE_OPENGL_AUTOGEN_SOURCES}
+ "opengl/BackendGL.cpp"
+ "opengl/BackendGL.h"
+ "opengl/BindGroupGL.cpp"
+ "opengl/BindGroupGL.h"
+ "opengl/BindGroupLayoutGL.cpp"
+ "opengl/BindGroupLayoutGL.h"
+ "opengl/BufferGL.cpp"
+ "opengl/BufferGL.h"
+ "opengl/CommandBufferGL.cpp"
+ "opengl/CommandBufferGL.h"
+ "opengl/ComputePipelineGL.cpp"
+ "opengl/ComputePipelineGL.h"
+ "opengl/DeviceGL.cpp"
+ "opengl/DeviceGL.h"
+ "opengl/Forward.h"
+ "opengl/GLFormat.cpp"
+ "opengl/GLFormat.h"
+ "opengl/NativeSwapChainImplGL.cpp"
+ "opengl/NativeSwapChainImplGL.h"
+ "opengl/OpenGLFunctions.cpp"
+ "opengl/OpenGLFunctions.h"
+ "opengl/PersistentPipelineStateGL.cpp"
+ "opengl/PersistentPipelineStateGL.h"
+ "opengl/PipelineGL.cpp"
+ "opengl/PipelineGL.h"
+ "opengl/PipelineLayoutGL.cpp"
+ "opengl/PipelineLayoutGL.h"
+ "opengl/QueueGL.cpp"
+ "opengl/QueueGL.h"
+ "opengl/RenderPipelineGL.cpp"
+ "opengl/RenderPipelineGL.h"
+ "opengl/SamplerGL.cpp"
+ "opengl/SamplerGL.h"
+ "opengl/ShaderModuleGL.cpp"
+ "opengl/ShaderModuleGL.h"
+ "opengl/SwapChainGL.cpp"
+ "opengl/SwapChainGL.h"
+ "opengl/TextureGL.cpp"
+ "opengl/TextureGL.h"
+ "opengl/UtilsGL.cpp"
+ "opengl/UtilsGL.h"
+ "opengl/opengl_platform.h"
+ )
+
+ target_link_libraries(dawn_native PRIVATE dawn_khronos_platform)
+endif()
+
+if (DAWN_ENABLE_VULKAN)
+ target_sources(dawn_native PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_native/VulkanBackend.h"
+ "vulkan/AdapterVk.cpp"
+ "vulkan/AdapterVk.h"
+ "vulkan/BackendVk.cpp"
+ "vulkan/BackendVk.h"
+ "vulkan/BindGroupLayoutVk.cpp"
+ "vulkan/BindGroupLayoutVk.h"
+ "vulkan/BindGroupVk.cpp"
+ "vulkan/BindGroupVk.h"
+ "vulkan/BufferVk.cpp"
+ "vulkan/BufferVk.h"
+ "vulkan/CommandBufferVk.cpp"
+ "vulkan/CommandBufferVk.h"
+ "vulkan/CommandRecordingContext.h"
+ "vulkan/ComputePipelineVk.cpp"
+ "vulkan/ComputePipelineVk.h"
+ "vulkan/DescriptorSetService.cpp"
+ "vulkan/DescriptorSetService.h"
+ "vulkan/DeviceVk.cpp"
+ "vulkan/DeviceVk.h"
+ "vulkan/ExternalHandle.h"
+ "vulkan/FencedDeleter.cpp"
+ "vulkan/FencedDeleter.h"
+ "vulkan/Forward.h"
+ "vulkan/NativeSwapChainImplVk.cpp"
+ "vulkan/NativeSwapChainImplVk.h"
+ "vulkan/PipelineLayoutVk.cpp"
+ "vulkan/PipelineLayoutVk.h"
+ "vulkan/QueueVk.cpp"
+ "vulkan/QueueVk.h"
+ "vulkan/RenderPassCache.cpp"
+ "vulkan/RenderPassCache.h"
+ "vulkan/RenderPipelineVk.cpp"
+ "vulkan/RenderPipelineVk.h"
+ "vulkan/ResourceHeapVk.cpp"
+ "vulkan/ResourceHeapVk.h"
+ "vulkan/ResourceMemoryAllocatorVk.cpp"
+ "vulkan/ResourceMemoryAllocatorVk.h"
+ "vulkan/SamplerVk.cpp"
+ "vulkan/SamplerVk.h"
+ "vulkan/ShaderModuleVk.cpp"
+ "vulkan/ShaderModuleVk.h"
+ "vulkan/StagingBufferVk.cpp"
+ "vulkan/StagingBufferVk.h"
+ "vulkan/SwapChainVk.cpp"
+ "vulkan/SwapChainVk.h"
+ "vulkan/TextureVk.cpp"
+ "vulkan/TextureVk.h"
+ "vulkan/UtilsVulkan.cpp"
+ "vulkan/UtilsVulkan.h"
+ "vulkan/VulkanError.cpp"
+ "vulkan/VulkanError.h"
+ "vulkan/VulkanFunctions.cpp"
+ "vulkan/VulkanFunctions.h"
+ "vulkan/VulkanInfo.cpp"
+ "vulkan/VulkanInfo.h"
+ "vulkan/external_memory/MemoryService.h"
+ "vulkan/external_semaphore/SemaphoreService.h"
+ )
+
+ target_link_libraries(dawn_native PUBLIC dawn_vulkan_headers)
+
+ if (UNIX AND NOT APPLE)
+ target_sources(dawn_native PRIVATE
+ "vulkan/external_memory/MemoryServiceOpaqueFD.cpp"
+ "vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp"
+ )
+ else()
+ target_sources(dawn_native PRIVATE
+ "vulkan/external_memory/MemoryServiceNull.cpp"
+ "vulkan/external_semaphore/SemaphoreServiceNull.cpp"
+ )
+ endif()
+endif()
+
+# TODO how to do the component build in CMake?
+target_sources(dawn_native PRIVATE "DawnNative.cpp")
+if (DAWN_ENABLE_D3D12)
+ target_sources(dawn_native PRIVATE "d3d12/D3D12Backend.cpp")
+endif()
+if (DAWN_ENABLE_METAL)
+ target_sources(dawn_native PRIVATE "metal/MetalBackend.mm")
+endif()
+if (DAWN_ENABLE_NULL)
+ target_sources(dawn_native PRIVATE "null/NullBackend.cpp")
+endif()
+if (DAWN_ENABLE_OPENGL)
+ target_sources(dawn_native PRIVATE "opengl/OpenGLBackend.cpp")
+endif()
+if (DAWN_ENABLE_VULKAN)
+ target_sources(dawn_native PRIVATE "vulkan/VulkanBackend.cpp")
+endif()
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
index 990c1c59909..553f8896577 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
@@ -23,12 +23,9 @@
namespace dawn_native {
- constexpr uint32_t EndOfBlock = UINT_MAX; // std::numeric_limits<uint32_t>::max();
- constexpr uint32_t AdditionalData = UINT_MAX - 1; // std::numeric_limits<uint32_t>::max() - 1;
-
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
- CommandIterator::CommandIterator() : mEndOfBlock(EndOfBlock) {
+ CommandIterator::CommandIterator() {
Reset();
}
@@ -42,7 +39,7 @@ namespace dawn_native {
}
}
- CommandIterator::CommandIterator(CommandIterator&& other) : mEndOfBlock(EndOfBlock) {
+ CommandIterator::CommandIterator(CommandIterator&& other) {
if (!other.IsEmpty()) {
mBlocks = std::move(other.mBlocks);
other.Reset();
@@ -64,7 +61,7 @@ namespace dawn_native {
}
CommandIterator::CommandIterator(CommandAllocator&& allocator)
- : mBlocks(allocator.AcquireBlocks()), mEndOfBlock(EndOfBlock) {
+ : mBlocks(allocator.AcquireBlocks()) {
Reset();
}
@@ -74,6 +71,17 @@ namespace dawn_native {
return *this;
}
+ bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+ mCurrentBlock++;
+ if (mCurrentBlock >= mBlocks.size()) {
+ Reset();
+ *commandId = detail::kEndOfBlock;
+ return false;
+ }
+ mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+ return NextCommandId(commandId);
+ }
+
void CommandIterator::Reset() {
mCurrentBlock = 0;
@@ -97,47 +105,6 @@ namespace dawn_native {
return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
}
- bool CommandIterator::NextCommandId(uint32_t* commandId) {
- uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
- ASSERT(idPtr + sizeof(uint32_t) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
- if (id == EndOfBlock) {
- mCurrentBlock++;
- if (mCurrentBlock >= mBlocks.size()) {
- Reset();
- *commandId = EndOfBlock;
- return false;
- }
- mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
- return NextCommandId(commandId);
- }
-
- mCurrentPtr = idPtr + sizeof(uint32_t);
- *commandId = id;
- return true;
- }
-
- void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
- uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
- ASSERT(commandPtr + sizeof(commandSize) <=
- mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
- mCurrentPtr = commandPtr + commandSize;
- return commandPtr;
- }
-
- void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
- uint32_t id;
- bool hasId = NextCommandId(&id);
- ASSERT(hasId);
- ASSERT(id == AdditionalData);
-
- return NextCommand(dataSize, dataAlignment);
- }
-
// Potential TODO(cwallez@chromium.org):
// - Host the size and pointer to next block in the block itself to avoid having an allocation
// in the vector
@@ -161,60 +128,23 @@ namespace dawn_native {
ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
- *reinterpret_cast<uint32_t*>(mCurrentPtr) = EndOfBlock;
+ *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
mCurrentPtr = nullptr;
mEndPtr = nullptr;
return std::move(mBlocks);
}
- uint8_t* CommandAllocator::Allocate(uint32_t commandId,
- size_t commandSize,
- size_t commandAlignment) {
- ASSERT(mCurrentPtr != nullptr);
- ASSERT(mEndPtr != nullptr);
- ASSERT(commandId != EndOfBlock);
-
- // It should always be possible to allocate one id, for EndOfBlock tagging,
- ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
- ASSERT(mEndPtr >= mCurrentPtr);
- ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
-
- // The memory after the ID will contain the following:
- // - the current ID
- // - padding to align the command, maximum kMaxSupportedAlignment
- // - the command of size commandSize
- // - padding to align the next ID, maximum alignof(uint32_t)
- // - the next ID of size sizeof(uint32_t)
- //
- // To avoid checking for overflows at every step of the computations we compute an upper
- // bound of the space that will be needed in addition to the command data.
- static constexpr size_t kWorstCaseAdditionalSize =
- sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
-
- // This can't overflow because by construction mCurrentPtr always has space for the next ID.
- size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
-
- // The good case were we have enough space for the command data and upper bound of the
- // extra required space.
- if ((remainingSize >= kWorstCaseAdditionalSize) &&
- (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
- uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = commandId;
-
- uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
- mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
-
- return commandAlloc;
- }
-
- // When there is not enough space, we signal the EndOfBlock, so that the iterator knows to
- // move to the next one. EndOfBlock on the last block means the end of the commands.
+ uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+ // to move to the next one. kEndOfBlock on the last block means the end of the commands.
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
- *idAlloc = EndOfBlock;
+ *idAlloc = detail::kEndOfBlock;
// We'll request a block that can contain at least the command ID, the command and an
- // additional ID to contain the EndOfBlock tag.
+ // additional ID to contain the kEndOfBlock tag.
size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
// The computation of the request could overflow.
@@ -228,10 +158,6 @@ namespace dawn_native {
return Allocate(commandId, commandSize, commandAlignment);
}
- uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
- return Allocate(AdditionalData, commandSize, commandAlignment);
- }
-
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
mLastAllocationSize =
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
index 504ba7a6a32..82de05c1a45 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
@@ -15,6 +15,9 @@
#ifndef DAWNNATIVE_COMMAND_ALLOCATOR_H_
#define DAWNNATIVE_COMMAND_ALLOCATOR_H_
+#include "common/Assert.h"
+#include "common/Math.h"
+
#include <cstddef>
#include <cstdint>
#include <vector>
@@ -56,6 +59,11 @@ namespace dawn_native {
};
using CommandBlocks = std::vector<BlockDef>;
+ namespace detail {
+ constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+ constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+ } // namespace detail
+
class CommandAllocator;
// TODO(cwallez@chromium.org): prevent copy for both iterator and allocator
@@ -91,15 +99,46 @@ namespace dawn_native {
private:
bool IsEmpty() const;
- bool NextCommandId(uint32_t* commandId);
- void* NextCommand(size_t commandSize, size_t commandAlignment);
- void* NextData(size_t dataSize, size_t dataAlignment);
+ DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+ uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+ ASSERT(idPtr + sizeof(uint32_t) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+ if (id != detail::kEndOfBlock) {
+ mCurrentPtr = idPtr + sizeof(uint32_t);
+ *commandId = id;
+ return true;
+ }
+ return NextCommandIdInNewBlock(commandId);
+ }
+
+ bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+ DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+ uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+ ASSERT(commandPtr + sizeof(commandSize) <=
+ mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+ mCurrentPtr = commandPtr + commandSize;
+ return commandPtr;
+ }
+
+ DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+ uint32_t id;
+ bool hasId = NextCommandId(&id);
+ ASSERT(hasId);
+ ASSERT(id == detail::kAdditionalData);
+
+ return NextCommand(dataSize, dataAlignment);
+ }
CommandBlocks mBlocks;
uint8_t* mCurrentPtr = nullptr;
size_t mCurrentBlock = 0;
// Used to avoid a special case for empty iterators.
- uint32_t mEndOfBlock;
+ uint32_t mEndOfBlock = detail::kEndOfBlock;
bool mDataWasDestroyed = false;
};
@@ -140,18 +179,67 @@ namespace dawn_native {
// using the CommandAllocator passes the static_asserts.
static constexpr size_t kMaxSupportedAlignment = 8;
+ // To avoid checking for overflows at every step of the computations we compute an upper
+ // bound of the space that will be needed in addition to the command data.
+ static constexpr size_t kWorstCaseAdditionalSize =
+ sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
+
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
- uint8_t* Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment);
- uint8_t* AllocateData(size_t dataSize, size_t dataAlignment);
+ DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment) {
+ ASSERT(mCurrentPtr != nullptr);
+ ASSERT(mEndPtr != nullptr);
+ ASSERT(commandId != detail::kEndOfBlock);
+
+ // It should always be possible to allocate one id, for kEndOfBlock tagging,
+ ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+ ASSERT(mEndPtr >= mCurrentPtr);
+ ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+
+ // The memory after the ID will contain the following:
+ // - the current ID
+ // - padding to align the command, maximum kMaxSupportedAlignment
+ // - the command of size commandSize
+ // - padding to align the next ID, maximum alignof(uint32_t)
+ // - the next ID of size sizeof(uint32_t)
+
+ // This can't overflow because by construction mCurrentPtr always has space for the next
+ // ID.
+ size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+
+ // The good case were we have enough space for the command data and upper bound of the
+ // extra required space.
+ if ((remainingSize >= kWorstCaseAdditionalSize) &&
+ (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+ uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+ *idAlloc = commandId;
+
+ uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+ mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+ return commandAlloc;
+ }
+ return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+ }
+
+ uint8_t* AllocateInNewBlock(uint32_t commandId,
+ size_t commandSize,
+ size_t commandAlignment);
+
+ DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+ return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
+ }
+
bool GetNewBlock(size_t minimumSize);
CommandBlocks mBlocks;
size_t mLastAllocationSize = 2048;
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
- // least one uint32_t if not nullptr, so that the special EndOfBlock command id can always
+ // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out.
uint8_t* mCurrentPtr = nullptr;
uint8_t* mEndPtr = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index c042d50db33..e02cff1adcf 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -14,7 +14,10 @@
#include "dawn_native/CommandBuffer.h"
+#include "common/BitSetIterator.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/Commands.h"
+#include "dawn_native/Format.h"
#include "dawn_native/Texture.h"
namespace dawn_native {
@@ -47,4 +50,92 @@ namespace dawn_native {
}
return false;
}
+
+ void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
+ for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ auto& attachmentInfo = renderPass->colorAttachments[i];
+ TextureViewBase* view = attachmentInfo.view.Get();
+ bool hasResolveTarget = attachmentInfo.resolveTarget.Get() != nullptr;
+
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetLevelCount() == 1);
+
+ // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
+ if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
+ !view->GetTexture()->IsSubresourceContentInitialized(
+ view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1)) {
+ attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+ attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
+ }
+
+ if (hasResolveTarget) {
+ // We need to set the resolve target to initialized so that it does not get
+ // cleared later in the pipeline. The texture will be resolved from the
+ // source color attachment, which will be correctly initialized.
+ TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+ resolveView->GetTexture()->SetIsSubresourceContentInitialized(
+ true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
+ resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
+ }
+
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ break;
+
+ case wgpu::StoreOp::Clear:
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ auto& attachmentInfo = renderPass->depthStencilAttachment;
+ TextureViewBase* view = attachmentInfo.view.Get();
+
+ // If the depth stencil texture has not been initialized, we want to use loadop
+ // clear to init the contents to 0's
+ if (!view->GetTexture()->IsSubresourceContentInitialized(
+ view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
+ view->GetLayerCount())) {
+ if (view->GetTexture()->GetFormat().HasDepth() &&
+ attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ }
+ if (view->GetTexture()->GetFormat().HasStencil() &&
+ attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+ }
+ }
+
+ // If these have different store ops, make them both Store because we can't track
+ // initialized state separately yet. TODO(crbug.com/dawn/145)
+ if (attachmentInfo.depthStoreOp != attachmentInfo.stencilStoreOp) {
+ attachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+ attachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+ }
+
+ if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Store &&
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store) {
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ true, view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
+ view->GetLayerCount());
+ } else {
+ ASSERT(attachmentInfo.depthStoreOp == wgpu::StoreOp::Clear &&
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Clear);
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ false, view->GetBaseMipLevel(), view->GetLevelCount(),
+ view->GetBaseArrayLayer(), view->GetLayerCount());
+ }
+ }
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
index 65650bd4979..c1d2597ff84 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
@@ -23,6 +23,8 @@
namespace dawn_native {
+ struct BeginRenderPassCmd;
+
class CommandBufferBase : public ObjectBase {
public:
CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
@@ -39,6 +41,8 @@ namespace dawn_native {
const Extent3D copySize,
const uint32_t mipLevel);
+ void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+
} // namespace dawn_native
#endif // DAWNNATIVE_COMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index e0de3a215a7..23710cd9d54 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -26,9 +26,11 @@
#include "dawn_native/ErrorData.h"
#include "dawn_native/RenderPassEncoder.h"
#include "dawn_native/RenderPipeline.h"
+#include "dawn_native/ValidationUtils_autogen.h"
#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
+#include <cmath>
#include <map>
namespace dawn_native {
@@ -376,6 +378,18 @@ namespace dawn_native {
"renderable");
}
+ DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
+ DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
+
+ if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
+ if (std::isnan(colorAttachment.clearColor.r) ||
+ std::isnan(colorAttachment.clearColor.g) ||
+ std::isnan(colorAttachment.clearColor.b) ||
+ std::isnan(colorAttachment.clearColor.a)) {
+ return DAWN_VALIDATION_ERROR("Color clear value cannot contain NaN");
+ }
+ }
+
DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
DAWN_TRY(ValidateResolveTarget(device, colorAttachment));
@@ -404,6 +418,16 @@ namespace dawn_native {
"depth stencil format");
}
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+ DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+ DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+
+ if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+ std::isnan(depthStencilAttachment->clearDepth)) {
+ return DAWN_VALIDATION_ERROR("Depth clear value cannot be NaN");
+ }
+
// This validates that the depth storeOp and stencil storeOps are the same
if (depthStencilAttachment->depthStoreOp != depthStencilAttachment->stencilStoreOp) {
return DAWN_VALIDATION_ERROR(
@@ -740,6 +764,7 @@ namespace dawn_native {
// state of the encoding context. The internal state is set to finished, and subsequent
// calls to encode commands will generate errors.
if (device->ConsumedError(mEncodingContext.Finish()) ||
+ device->ConsumedError(device->ValidateIsAlive()) ||
(device->IsValidationEnabled() &&
device->ConsumedError(ValidateFinish(mEncodingContext.GetIterator(),
mEncodingContext.GetPassUsages())))) {
@@ -768,12 +793,14 @@ namespace dawn_native {
case Command::BeginComputePass: {
commands->NextCommand<BeginComputePassCmd>();
DAWN_TRY(ValidateComputePass(commands));
- } break;
+ break;
+ }
case Command::BeginRenderPass: {
const BeginRenderPassCmd* cmd = commands->NextCommand<BeginRenderPassCmd>();
DAWN_TRY(ValidateRenderPass(commands, cmd));
- } break;
+ break;
+ }
case Command::CopyBufferToBuffer: {
const CopyBufferToBufferCmd* copy =
@@ -788,7 +815,8 @@ namespace dawn_native {
DAWN_TRY(ValidateCanUseAs(copy->source.Get(), wgpu::BufferUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.Get(), wgpu::BufferUsage::CopyDst));
- } break;
+ break;
+ }
case Command::CopyBufferToTexture: {
const CopyBufferToTextureCmd* copy =
@@ -821,7 +849,8 @@ namespace dawn_native {
ValidateCanUseAs(copy->source.buffer.Get(), wgpu::BufferUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.texture.Get(),
wgpu::TextureUsage::CopyDst));
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
const CopyTextureToBufferCmd* copy =
@@ -854,7 +883,8 @@ namespace dawn_native {
ValidateCanUseAs(copy->source.texture.Get(), wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.buffer.Get(),
wgpu::BufferUsage::CopyDst));
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
const CopyTextureToTextureCmd* copy =
@@ -879,24 +909,28 @@ namespace dawn_native {
ValidateCanUseAs(copy->source.texture.Get(), wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.texture.Get(),
wgpu::TextureUsage::CopyDst));
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
const InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
commands->NextCommand<PopDebugGroupCmd>();
DAWN_TRY(ValidateCanPopDebugGroup(debugGroupStackSize));
debugGroupStackSize--;
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
const PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
debugGroupStackSize++;
- } break;
+ break;
+ }
default:
return DAWN_VALIDATION_ERROR("Command disallowed outside of a pass");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index dc7a3ccec54..e56d0e34fa9 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -37,39 +37,46 @@ namespace dawn_native {
case Command::Draw: {
commands->NextCommand<DrawCmd>();
DAWN_TRY(commandBufferState->ValidateCanDraw());
- } break;
+ break;
+ }
case Command::DrawIndexed: {
commands->NextCommand<DrawIndexedCmd>();
DAWN_TRY(commandBufferState->ValidateCanDrawIndexed());
- } break;
+ break;
+ }
case Command::DrawIndirect: {
commands->NextCommand<DrawIndirectCmd>();
DAWN_TRY(commandBufferState->ValidateCanDraw());
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
commands->NextCommand<DrawIndexedIndirectCmd>();
DAWN_TRY(commandBufferState->ValidateCanDrawIndexed());
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
commands->NextCommand<PopDebugGroupCmd>();
DAWN_TRY(ValidateCanPopDebugGroup(*debugGroupStackSize));
*debugGroupStackSize -= 1;
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
*debugGroupStackSize += 1;
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
@@ -79,7 +86,8 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Pipeline attachment state is not compatible");
}
commandBufferState->SetRenderPipeline(pipeline);
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
@@ -88,17 +96,20 @@ namespace dawn_native {
}
commandBufferState->SetBindGroup(cmd->index, cmd->group.Get());
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
commands->NextCommand<SetIndexBufferCmd>();
commandBufferState->SetIndexBuffer();
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
commandBufferState->SetVertexBuffer(cmd->slot);
- } break;
+ break;
+ }
default:
return DAWN_VALIDATION_ERROR(disallowedMessage);
@@ -150,7 +161,7 @@ namespace dawn_native {
commands->NextCommand<EndRenderPassCmd>();
DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
return {};
- } break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
@@ -168,23 +179,28 @@ namespace dawn_native {
commandBufferState = CommandBufferStateTracker{};
}
- } break;
+ break;
+ }
case Command::SetStencilReference: {
commands->NextCommand<SetStencilReferenceCmd>();
- } break;
+ break;
+ }
case Command::SetBlendColor: {
commands->NextCommand<SetBlendColorCmd>();
- } break;
+ break;
+ }
case Command::SetViewport: {
commands->NextCommand<SetViewportCmd>();
- } break;
+ break;
+ }
case Command::SetScissorRect: {
commands->NextCommand<SetScissorRectCmd>();
- } break;
+ break;
+ }
default:
DAWN_TRY(ValidateRenderBundleCommand(
@@ -208,40 +224,46 @@ namespace dawn_native {
commands->NextCommand<EndComputePassCmd>();
DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
return {};
- } break;
+ }
case Command::Dispatch: {
commands->NextCommand<DispatchCmd>();
DAWN_TRY(commandBufferState.ValidateCanDispatch());
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
commands->NextCommand<DispatchIndirectCmd>();
DAWN_TRY(commandBufferState.ValidateCanDispatch());
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
commands->NextCommand<PopDebugGroupCmd>();
DAWN_TRY(ValidateCanPopDebugGroup(debugGroupStackSize));
debugGroupStackSize--;
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
debugGroupStackSize++;
- } break;
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
ComputePipelineBase* pipeline = cmd->pipeline.Get();
commandBufferState.SetComputePipeline(pipeline);
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
@@ -249,7 +271,8 @@ namespace dawn_native {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
commandBufferState.SetBindGroup(cmd->index, cmd->group.Get());
- } break;
+ break;
+ }
default:
return DAWN_VALIDATION_ERROR("Command disallowed inside a compute pass");
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index 3810089066e..b4098108a32 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -33,60 +33,74 @@ namespace dawn_native {
case Command::BeginComputePass: {
BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
begin->~BeginComputePassCmd();
- } break;
+ break;
+ }
case Command::BeginRenderPass: {
BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
begin->~BeginRenderPassCmd();
- } break;
+ break;
+ }
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
copy->~CopyBufferToBufferCmd();
- } break;
+ break;
+ }
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
copy->~CopyBufferToTextureCmd();
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
copy->~CopyTextureToBufferCmd();
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
commands->NextCommand<CopyTextureToTextureCmd>();
copy->~CopyTextureToTextureCmd();
- } break;
+ break;
+ }
case Command::Dispatch: {
DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
dispatch->~DispatchCmd();
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
dispatch->~DispatchIndirectCmd();
- } break;
+ break;
+ }
case Command::Draw: {
DrawCmd* draw = commands->NextCommand<DrawCmd>();
draw->~DrawCmd();
- } break;
+ break;
+ }
case Command::DrawIndexed: {
DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
draw->~DrawIndexedCmd();
- } break;
+ break;
+ }
case Command::DrawIndirect: {
DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
draw->~DrawIndirectCmd();
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
draw->~DrawIndexedIndirectCmd();
- } break;
+ break;
+ }
case Command::EndComputePass: {
EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
cmd->~EndComputePassCmd();
- } break;
+ break;
+ }
case Command::EndRenderPass: {
EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
cmd->~EndRenderPassCmd();
- } break;
+ break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
@@ -94,60 +108,73 @@ namespace dawn_native {
(&bundles[i])->~Ref<RenderBundleBase>();
}
cmd->~ExecuteBundlesCmd();
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
cmd->~InsertDebugMarkerCmd();
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
cmd->~PopDebugGroupCmd();
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
cmd->~PushDebugGroupCmd();
- } break;
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
cmd->~SetComputePipelineCmd();
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
cmd->~SetRenderPipelineCmd();
- } break;
+ break;
+ }
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
cmd->~SetStencilReferenceCmd();
- } break;
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
cmd->~SetViewportCmd();
- } break;
+ break;
+ }
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
cmd->~SetScissorRectCmd();
- } break;
+ break;
+ }
case Command::SetBlendColor: {
SetBlendColorCmd* cmd = commands->NextCommand<SetBlendColorCmd>();
cmd->~SetBlendColorCmd();
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
cmd->~SetBindGroupCmd();
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
cmd->~SetIndexBufferCmd();
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
cmd->~SetVertexBufferCmd();
- } break;
+ break;
+ }
}
}
commands->DataWasDestroyed();
@@ -214,12 +241,14 @@ namespace dawn_native {
case Command::ExecuteBundles: {
auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
commands->NextData<Ref<RenderBundleBase>>(cmd->count);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
- } break;
+ break;
+ }
case Command::PopDebugGroup:
commands->NextCommand<PopDebugGroupCmd>();
@@ -228,7 +257,8 @@ namespace dawn_native {
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
- } break;
+ break;
+ }
case Command::SetComputePipeline:
commands->NextCommand<SetComputePipelineCmd>();
@@ -259,7 +289,8 @@ namespace dawn_native {
if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- } break;
+ break;
+ }
case Command::SetIndexBuffer:
commands->NextCommand<SetIndexBufferCmd>();
@@ -267,7 +298,8 @@ namespace dawn_native {
case Command::SetVertexBuffer: {
commands->NextCommand<SetVertexBufferCmd>();
- } break;
+ break;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index 45fbb55ad9e..e4f7ab410e6 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/DawnNative.h"
#include "dawn_native/Device.h"
#include "dawn_native/Instance.h"
+#include "dawn_native/Texture.h"
#include "dawn_platform/DawnPlatform.h"
// Contains the entry-points into dawn_native
@@ -44,12 +45,44 @@ namespace dawn_native {
mImpl = nullptr;
}
+ void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
+ properties->backendType = mImpl->GetBackendType();
+ properties->adapterType = mImpl->GetAdapterType();
+ properties->deviceID = mImpl->GetPCIInfo().deviceId;
+ properties->vendorID = mImpl->GetPCIInfo().vendorId;
+ properties->name = mImpl->GetPCIInfo().name.c_str();
+ }
+
BackendType Adapter::GetBackendType() const {
- return mImpl->GetBackendType();
+ switch (mImpl->GetBackendType()) {
+ case wgpu::BackendType::D3D12:
+ return BackendType::D3D12;
+ case wgpu::BackendType::Metal:
+ return BackendType::Metal;
+ case wgpu::BackendType::Null:
+ return BackendType::Null;
+ case wgpu::BackendType::OpenGL:
+ return BackendType::OpenGL;
+ case wgpu::BackendType::Vulkan:
+ return BackendType::Vulkan;
+ default:
+ UNREACHABLE();
+ }
}
DeviceType Adapter::GetDeviceType() const {
- return mImpl->GetDeviceType();
+ switch (mImpl->GetAdapterType()) {
+ case wgpu::AdapterType::DiscreteGPU:
+ return DeviceType::DiscreteGPU;
+ case wgpu::AdapterType::IntegratedGPU:
+ return DeviceType::IntegratedGPU;
+ case wgpu::AdapterType::CPU:
+ return DeviceType::CPU;
+ case wgpu::AdapterType::Unknown:
+ return DeviceType::Unknown;
+ default:
+ UNREACHABLE();
+ }
}
const PCIInfo& Adapter::GetPCIInfo() const {
@@ -75,17 +108,20 @@ namespace dawn_native {
// AdapterDiscoverOptionsBase
- AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(BackendType type) : backendType(type) {
+ AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
+ : backendType(type) {
}
// Instance
- Instance::Instance() : mImpl(new InstanceBase()) {
+ Instance::Instance() : mImpl(InstanceBase::Create()) {
}
Instance::~Instance() {
- delete mImpl;
- mImpl = nullptr;
+ if (mImpl != nullptr) {
+ mImpl->Release();
+ mImpl = nullptr;
+ }
}
void Instance::DiscoverDefaultAdapters() {
@@ -113,24 +149,16 @@ namespace dawn_native {
mImpl->EnableBackendValidation(enableBackendValidation);
}
- bool Instance::IsBackendValidationEnabled() const {
- return mImpl->IsBackendValidationEnabled();
- }
-
void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
}
- bool Instance::IsBeginCaptureOnStartupEnabled() const {
- return mImpl->IsBeginCaptureOnStartupEnabled();
- }
-
void Instance::SetPlatform(dawn_platform::Platform* platform) {
mImpl->SetPlatform(platform);
}
- dawn_platform::Platform* Instance::GetPlatform() const {
- return mImpl->GetPlatform();
+ WGPUInstance Instance::Get() const {
+ return reinterpret_cast<WGPUInstance>(mImpl);
}
size_t GetLazyClearCountForTesting(WGPUDevice device) {
@@ -138,10 +166,25 @@ namespace dawn_native {
return deviceBase->GetLazyClearCountForTesting();
}
+ bool IsTextureSubresourceInitialized(WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount) {
+ dawn_native::TextureBase* textureBase =
+ reinterpret_cast<dawn_native::TextureBase*>(texture);
+ return textureBase->IsSubresourceContentInitialized(baseMipLevel, levelCount,
+ baseArrayLayer, layerCount);
+ }
+
std::vector<const char*> GetProcMapNamesForTestingInternal();
std::vector<const char*> GetProcMapNamesForTesting() {
return GetProcMapNamesForTestingInternal();
}
+ ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageDescriptorType type)
+ : type(type) {
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index 664b81d5912..fa3cd9b7a50 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -35,6 +35,7 @@
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/Sampler.h"
#include "dawn_native/ShaderModule.h"
+#include "dawn_native/Surface.h"
#include "dawn_native/SwapChain.h"
#include "dawn_native/Texture.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -94,31 +95,71 @@ namespace dawn_native {
ASSERT(mCaches->shaderModules.empty());
}
- void DeviceBase::HandleError(wgpu::ErrorType type, const char* message) {
- mCurrentErrorScope->HandleError(type, message);
+ void DeviceBase::BaseDestructor() {
+ if (mLossStatus != LossStatus::Alive) {
+ // if device is already lost, we may still have fences and error scopes to clear since
+ // the time the device was lost, clear them now before we destruct the device.
+ mErrorScopeTracker->Tick(GetCompletedCommandSerial());
+ mFenceSignalTracker->Tick(GetCompletedCommandSerial());
+ return;
+ }
+ // Assert that errors are device loss so that we can continue with destruction
+ AssertAndIgnoreDeviceLossError(WaitForIdleForDestruction());
+ Destroy();
+ mLossStatus = LossStatus::AlreadyLost;
+ }
+
+ void DeviceBase::HandleError(InternalErrorType type, const char* message) {
+ // If we receive an internal error, assume the backend can't recover and proceed with
+ // device destruction. We first wait for all previous commands to be completed so that
+ // backend objects can be freed immediately, before handling the loss.
+ if (type == InternalErrorType::Internal) {
+ mLossStatus = LossStatus::BeingLost;
+ // Assert that errors are device loss so that we can continue with destruction.
+ AssertAndIgnoreDeviceLossError(WaitForIdleForDestruction());
+ HandleLoss(message);
+ }
+
+ // The device was lost for real, call the loss handler because all the backend objects are
+ // as if no longer in use.
+ if (type == InternalErrorType::DeviceLost) {
+ HandleLoss(message);
+ }
+
+ // Still forward device loss and internal errors to the error scopes so they all reject.
+ mCurrentErrorScope->HandleError(ToWGPUErrorType(type), message);
}
void DeviceBase::InjectError(wgpu::ErrorType type, const char* message) {
if (ConsumedError(ValidateErrorType(type))) {
return;
}
- if (DAWN_UNLIKELY(type == wgpu::ErrorType::NoError)) {
- HandleError(wgpu::ErrorType::Validation, "Invalid injected error NoError");
+
+ // This method should only be used to make error scope reject. For DeviceLost there is the
+ // LoseForTesting function that can be used instead.
+ if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
+ HandleError(InternalErrorType::Validation,
+ "Invalid injected error, must be Validation or OutOfMemory");
return;
}
- HandleError(type, message);
+
+ HandleError(FromWGPUErrorType(type), message);
}
- void DeviceBase::ConsumeError(ErrorData* error) {
+ void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
ASSERT(error != nullptr);
HandleError(error->GetType(), error->GetMessage().c_str());
- delete error;
}
void DeviceBase::SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
mRootErrorScope->SetCallback(callback, userdata);
}
+ void DeviceBase::SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+ }
+
void DeviceBase::PushErrorScope(wgpu::ErrorFilter filter) {
if (ConsumedError(ValidateErrorFilter(filter))) {
return;
@@ -152,6 +193,38 @@ namespace dawn_native {
return {};
}
+ MaybeError DeviceBase::ValidateIsAlive() const {
+ if (DAWN_LIKELY(mLossStatus == LossStatus::Alive)) {
+ return {};
+ }
+ return DAWN_DEVICE_LOST_ERROR("Device is lost");
+ }
+
+ void DeviceBase::HandleLoss(const char* message) {
+ if (mLossStatus == LossStatus::AlreadyLost) {
+ return;
+ }
+
+ Destroy();
+ mLossStatus = LossStatus::AlreadyLost;
+
+ if (mDeviceLostCallback) {
+ mDeviceLostCallback(message, mDeviceLostUserdata);
+ }
+ }
+
+ void DeviceBase::LoseForTesting() {
+ if (mLossStatus == LossStatus::AlreadyLost) {
+ return;
+ }
+
+ HandleError(InternalErrorType::Internal, "Device lost for testing");
+ }
+
+ bool DeviceBase::IsLost() const {
+ return mLossStatus != LossStatus::Alive;
+ }
+
AdapterBase* DeviceBase::GetAdapter() const {
return mAdapter;
}
@@ -432,7 +505,9 @@ namespace dawn_native {
WGPUCreateBufferMappedResult result = CreateBufferMapped(descriptor);
WGPUBufferMapAsyncStatus status = WGPUBufferMapAsyncStatus_Success;
- if (result.data == nullptr || result.dataLength != descriptor->size) {
+ if (IsLost()) {
+ status = WGPUBufferMapAsyncStatus_DeviceLost;
+ } else if (result.data == nullptr || result.dataLength != descriptor->size) {
status = WGPUBufferMapAsyncStatus_Error;
}
@@ -472,10 +547,7 @@ namespace dawn_native {
QueueBase* result = nullptr;
if (ConsumedError(CreateQueueInternal(&result))) {
- // If queue creation failure ever becomes possible, we should implement MakeError and
- // friends for them.
- UNREACHABLE();
- return nullptr;
+ return QueueBase::MakeError(this);
}
return result;
@@ -518,10 +590,11 @@ namespace dawn_native {
return result;
}
- SwapChainBase* DeviceBase::CreateSwapChain(const SwapChainDescriptor* descriptor) {
+ SwapChainBase* DeviceBase::CreateSwapChain(Surface* surface,
+ const SwapChainDescriptor* descriptor) {
SwapChainBase* result = nullptr;
- if (ConsumedError(CreateSwapChainInternal(&result, descriptor))) {
+ if (ConsumedError(CreateSwapChainInternal(&result, surface, descriptor))) {
return SwapChainBase::MakeError(this);
}
@@ -550,15 +623,21 @@ namespace dawn_native {
// Other Device API methods
void DeviceBase::Tick() {
- if (ConsumedError(TickImpl()))
- return;
-
+ // We need to do the deferred callback even if Device is lost since Buffer Map Async will
+ // send callback with device lost status when device is lost.
{
auto deferredResults = std::move(mDeferredCreateBufferMappedAsyncResults);
for (const auto& deferred : deferredResults) {
deferred.callback(deferred.status, deferred.result, deferred.userdata);
}
}
+ if (ConsumedError(ValidateIsAlive())) {
+ return;
+ }
+ if (ConsumedError(TickImpl())) {
+ return;
+ }
+
mErrorScopeTracker->Tick(GetCompletedCommandSerial());
mFenceSignalTracker->Tick(GetCompletedCommandSerial());
}
@@ -632,12 +711,14 @@ namespace dawn_native {
void DeviceBase::SetDefaultToggles() {
// Sets the default-enabled toggles
mTogglesSet.SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
+ mTogglesSet.SetToggle(Toggle::UseSpvc, false);
}
// Implementation details of object creation
MaybeError DeviceBase::CreateBindGroupInternal(BindGroupBase** result,
const BindGroupDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupDescriptor(this, descriptor));
}
@@ -648,6 +729,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateBindGroupLayoutInternal(
BindGroupLayoutBase** result,
const BindGroupLayoutDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupLayoutDescriptor(this, descriptor));
}
@@ -657,6 +739,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateBufferInternal(BufferBase** result,
const BufferDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
}
@@ -667,6 +750,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateComputePipelineInternal(
ComputePipelineBase** result,
const ComputePipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
}
@@ -691,6 +775,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreatePipelineLayoutInternal(
PipelineLayoutBase** result,
const PipelineLayoutDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
}
@@ -699,6 +784,7 @@ namespace dawn_native {
}
MaybeError DeviceBase::CreateQueueInternal(QueueBase** result) {
+ DAWN_TRY(ValidateIsAlive());
DAWN_TRY_ASSIGN(*result, CreateQueueImpl());
return {};
}
@@ -706,6 +792,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateRenderBundleEncoderInternal(
RenderBundleEncoder** result,
const RenderBundleEncoderDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
}
@@ -716,6 +803,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateRenderPipelineInternal(
RenderPipelineBase** result,
const RenderPipelineDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
}
@@ -748,6 +836,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateSamplerInternal(SamplerBase** result,
const SamplerDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateSamplerDescriptor(this, descriptor));
}
@@ -757,6 +846,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateShaderModuleInternal(ShaderModuleBase** result,
const ShaderModuleDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor));
}
@@ -765,16 +855,37 @@ namespace dawn_native {
}
MaybeError DeviceBase::CreateSwapChainInternal(SwapChainBase** result,
+ Surface* surface,
const SwapChainDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
- DAWN_TRY(ValidateSwapChainDescriptor(this, descriptor));
+ DAWN_TRY(ValidateSwapChainDescriptor(this, surface, descriptor));
+ }
+
+ if (surface == nullptr) {
+ DAWN_TRY_ASSIGN(*result, CreateSwapChainImpl(descriptor));
+ } else {
+ ASSERT(descriptor->implementation == 0);
+
+ NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
+ NewSwapChainBase* newSwapChain;
+ DAWN_TRY_ASSIGN(newSwapChain,
+ CreateSwapChainImpl(surface, previousSwapChain, descriptor));
+
+ if (previousSwapChain != nullptr) {
+ ASSERT(!previousSwapChain->IsAttached());
+ }
+ ASSERT(newSwapChain->IsAttached());
+
+ surface->SetAttachedSwapChain(newSwapChain);
+ *result = newSwapChain;
}
- DAWN_TRY_ASSIGN(*result, CreateSwapChainImpl(descriptor));
return {};
}
MaybeError DeviceBase::CreateTextureInternal(TextureBase** result,
const TextureDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
}
@@ -785,6 +896,7 @@ namespace dawn_native {
MaybeError DeviceBase::CreateTextureViewInternal(TextureViewBase** result,
TextureBase* texture,
const TextureViewDescriptor* descriptor) {
+ DAWN_TRY(ValidateIsAlive());
DAWN_TRY(ValidateObject(texture));
TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
if (IsValidationEnabled()) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index e887fa7c1a3..e967f3e0764 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -29,16 +29,14 @@
#include <memory>
namespace dawn_native {
-
- using ErrorCallback = void (*)(const char* errorMessage, void* userData);
-
class AdapterBase;
class AttachmentState;
class AttachmentStateBlueprint;
+ class BindGroupLayoutBase;
+ class DynamicUploader;
class ErrorScope;
class ErrorScopeTracker;
class FenceSignalTracker;
- class DynamicUploader;
class StagingBufferBase;
class DeviceBase {
@@ -46,7 +44,7 @@ namespace dawn_native {
DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
virtual ~DeviceBase();
- void HandleError(wgpu::ErrorType type, const char* message);
+ void HandleError(InternalErrorType type, const char* message);
bool ConsumedError(MaybeError maybeError) {
if (DAWN_UNLIKELY(maybeError.IsError())) {
@@ -154,7 +152,7 @@ namespace dawn_native {
RenderPipelineBase* CreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
SamplerBase* CreateSampler(const SamplerDescriptor* descriptor);
ShaderModuleBase* CreateShaderModule(const ShaderModuleDescriptor* descriptor);
- SwapChainBase* CreateSwapChain(const SwapChainDescriptor* descriptor);
+ SwapChainBase* CreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
TextureBase* CreateTexture(const TextureDescriptor* descriptor);
TextureViewBase* CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor);
@@ -163,9 +161,13 @@ namespace dawn_native {
void Tick();
+ void SetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
void SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
void PushErrorScope(wgpu::ErrorFilter filter);
bool PopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+
+ MaybeError ValidateIsAlive() const;
+
ErrorScope* GetCurrentErrorScope();
void Reference();
@@ -188,12 +190,22 @@ namespace dawn_native {
bool IsValidationEnabled() const;
size_t GetLazyClearCountForTesting();
void IncrementLazyClearCountForTesting();
+ void LoseForTesting();
+ bool IsLost() const;
protected:
void SetToggle(Toggle toggle, bool isEnabled);
void ApplyToggleOverrides(const DeviceDescriptor* deviceDescriptor);
+ void BaseDestructor();
std::unique_ptr<DynamicUploader> mDynamicUploader;
+ // LossStatus::Alive means the device is alive and can be used normally.
+ // LossStatus::BeingLost means the device is in the process of being lost and should not
+ // accept any new commands.
+ // LossStatus::AlreadyLost means the device has been lost and can no longer be used,
+ // all resources have been freed.
+ enum class LossStatus { Alive, BeingLost, AlreadyLost };
+ LossStatus mLossStatus = LossStatus::Alive;
private:
virtual ResultOrError<BindGroupBase*> CreateBindGroupImpl(
@@ -214,6 +226,11 @@ namespace dawn_native {
const ShaderModuleDescriptor* descriptor) = 0;
virtual ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) = 0;
+ // Note that previousSwapChain may be nullptr, or come from a different backend.
+ virtual ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) = 0;
virtual ResultOrError<TextureBase*> CreateTextureImpl(
const TextureDescriptor* descriptor) = 0;
virtual ResultOrError<TextureViewBase*> CreateTextureViewImpl(
@@ -239,6 +256,7 @@ namespace dawn_native {
MaybeError CreateShaderModuleInternal(ShaderModuleBase** result,
const ShaderModuleDescriptor* descriptor);
MaybeError CreateSwapChainInternal(SwapChainBase** result,
+ Surface* surface,
const SwapChainDescriptor* descriptor);
MaybeError CreateTextureInternal(TextureBase** result, const TextureDescriptor* descriptor);
MaybeError CreateTextureViewInternal(TextureViewBase** result,
@@ -249,7 +267,21 @@ namespace dawn_native {
void SetDefaultToggles();
- void ConsumeError(ErrorData* error);
+ void ConsumeError(std::unique_ptr<ErrorData> error);
+
+ // Destroy is used to clean up and release resources used by device, does not wait for GPU
+ // or check errors.
+ virtual void Destroy() = 0;
+
+ // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
+ // destruction. This is only used when properly destructing the device. For a real
+ // device loss, this function doesn't need to be called since the driver already closed all
+ // resources.
+ virtual MaybeError WaitForIdleForDestruction() = 0;
+
+ void HandleLoss(const char* message);
+ wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
+ void* mDeviceLostUserdata;
AdapterBase* mAdapter = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
index b8be06990d6..8ecf7b26f9b 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
@@ -53,7 +53,7 @@ namespace dawn_native {
}
}
- void EncodingContext::HandleError(wgpu::ErrorType type, const char* message) {
+ void EncodingContext::HandleError(InternalErrorType type, const char* message) {
if (!IsFinished()) {
// If the encoding context is not finished, errors are deferred until
// Finish() is called.
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
index c16d544c0fc..3142bd092c5 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
@@ -39,11 +39,10 @@ namespace dawn_native {
CommandIterator* GetIterator();
// Functions to handle encoder errors
- void HandleError(wgpu::ErrorType type, const char* message);
+ void HandleError(InternalErrorType type, const char* message);
- inline void ConsumeError(ErrorData* error) {
+ inline void ConsumeError(std::unique_ptr<ErrorData> error) {
HandleError(error->GetType(), error->GetMessage().c_str());
- delete error;
}
inline bool ConsumedError(MaybeError maybeError) {
@@ -59,10 +58,10 @@ namespace dawn_native {
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
if (mCurrentEncoder != mTopLevelEncoder) {
// The top level encoder was used when a pass encoder was current.
- HandleError(wgpu::ErrorType::Validation,
+ HandleError(InternalErrorType::Validation,
"Command cannot be recorded inside a pass");
} else {
- HandleError(wgpu::ErrorType::Validation,
+ HandleError(InternalErrorType::Validation,
"Recording in an error or already ended pass encoder");
}
return false;
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.cpp b/chromium/third_party/dawn/src/dawn_native/Error.cpp
index 195afb2e024..13db32ecf7c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Error.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Error.cpp
@@ -15,21 +15,46 @@
#include "dawn_native/Error.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/dawn_platform.h"
namespace dawn_native {
- ErrorData* MakeError(InternalErrorType type,
- std::string message,
- const char* file,
- const char* function,
- int line) {
- ErrorData* error = new ErrorData(type, message);
- error->AppendBacktrace(file, function, line);
- return error;
+ void AssertAndIgnoreDeviceLossError(MaybeError maybeError) {
+ if (maybeError.IsError()) {
+ std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
+ ASSERT(errorData->GetType() == InternalErrorType::DeviceLost);
+ }
}
- void AppendBacktrace(ErrorData* error, const char* file, const char* function, int line) {
- error->AppendBacktrace(file, function, line);
+ wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
+ switch (type) {
+ case InternalErrorType::Validation:
+ return wgpu::ErrorType::Validation;
+ case InternalErrorType::OutOfMemory:
+ return wgpu::ErrorType::OutOfMemory;
+
+ // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
+ // the device at the API level to be lost, so treat it like a DeviceLost error.
+ case InternalErrorType::Internal:
+ case InternalErrorType::DeviceLost:
+ return wgpu::ErrorType::DeviceLost;
+
+ default:
+ return wgpu::ErrorType::Unknown;
+ }
+ }
+
+ InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
+ switch (type) {
+ case wgpu::ErrorType::Validation:
+ return InternalErrorType::Validation;
+ case wgpu::ErrorType::OutOfMemory:
+ return InternalErrorType::OutOfMemory;
+ case wgpu::ErrorType::DeviceLost:
+ return InternalErrorType::DeviceLost;
+ default:
+ return InternalErrorType::Internal;
+ }
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Error.h b/chromium/third_party/dawn/src/dawn_native/Error.h
index 172263e0ac2..41a1eff2267 100644
--- a/chromium/third_party/dawn/src/dawn_native/Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/Error.h
@@ -16,23 +16,26 @@
#define DAWNNATIVE_ERROR_H_
#include "common/Result.h"
+#include "dawn_native/ErrorData.h"
#include <string>
namespace dawn_native {
- // This is the content of an error value for MaybeError or ResultOrError, split off to its own
- // file to avoid having all files including headers like <string> and <vector>
- class ErrorData;
-
- enum class InternalErrorType : uint32_t { Validation, DeviceLost, Unimplemented, OutOfMemory };
+ enum class InternalErrorType : uint32_t {
+ Validation,
+ DeviceLost,
+ Internal,
+ Unimplemented,
+ OutOfMemory
+ };
// MaybeError and ResultOrError are meant to be used as return value for function that are not
// expected to, but might fail. The handling of error is potentially much slower than successes.
- using MaybeError = Result<void, ErrorData*>;
+ using MaybeError = Result<void, ErrorData>;
template <typename T>
- using ResultOrError = Result<T, ErrorData*>;
+ using ResultOrError = Result<T, ErrorData>;
// Returning a success is done like so:
// return {}; // for Error
@@ -43,11 +46,37 @@ namespace dawn_native {
//
// but shorthand version for specific error types are preferred:
// return DAWN_VALIDATION_ERROR("My error message");
+ //
+ // There are different types of errors that should be used for different purpose:
+ //
+ // - Validation: these are errors that show the user did something bad, which causes the
+ // whole call to be a no-op. It's most commonly found in the frontend but there can be some
+ // backend specific validation in non-conformant backends too.
+ //
+ // - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
+ // This is similar to validation errors in that the call becomes a no-op and returns an
+ // error object, but is reported separated from validation to the user.
+ //
+ // - Device loss: the backend driver reported that the GPU has been lost, which means all
+ // previous commands magically disappeared and the only thing left to do is clean up.
+ // Note: Device loss should be used rarely and in most case you want to use Internal
+ // instead.
+ //
+ // - Internal: something happened that the backend didn't expect, and it doesn't know
+ // how to recover from that situation. This causes the device to be lost, but is separate
+ // from device loss, because the GPU execution is still happening so we need to clean up
+ // more gracefully.
+ //
+ // - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
+ // more clarity.
+
#define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
- ::dawn_native::MakeError(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
+ ::dawn_native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
#define DAWN_VALIDATION_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Validation, MESSAGE)
#define DAWN_DEVICE_LOST_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::DeviceLost, MESSAGE)
-#define DAWN_UNIMPLEMENTED_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Unimplemented, MESSAGE)
+#define DAWN_INTERNAL_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::Internal, MESSAGE)
+#define DAWN_UNIMPLEMENTED_ERROR(MESSAGE) \
+ DAWN_MAKE_ERROR(InternalErrorType::Internal, std::string("Unimplemented: ") + MESSAGE)
#define DAWN_OUT_OF_MEMORY_ERROR(MESSAGE) DAWN_MAKE_ERROR(InternalErrorType::OutOfMemory, MESSAGE)
#define DAWN_CONCAT1(x, y) x##y
@@ -57,42 +86,38 @@ namespace dawn_native {
// When Errors aren't handled explicitly, calls to functions returning errors should be
// wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
// the current function.
-#define DAWN_TRY(EXPR) \
- { \
- auto DAWN_LOCAL_VAR = EXPR; \
- if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
- ErrorData* error = DAWN_LOCAL_VAR.AcquireError(); \
- ::dawn_native::AppendBacktrace(error, __FILE__, __func__, __LINE__); \
- return {std::move(error)}; \
- } \
- } \
- for (;;) \
+#define DAWN_TRY(EXPR) \
+ { \
+ auto DAWN_LOCAL_VAR = EXPR; \
+ if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
+ std::unique_ptr<::dawn_native::ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+ error->AppendBacktrace(__FILE__, __func__, __LINE__); \
+ return {std::move(error)}; \
+ } \
+ } \
+ for (;;) \
break
// DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
// any, to VAR.
-#define DAWN_TRY_ASSIGN(VAR, EXPR) \
- { \
- auto DAWN_LOCAL_VAR = EXPR; \
- if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
- ErrorData* error = DAWN_LOCAL_VAR.AcquireError(); \
- ::dawn_native::AppendBacktrace(error, __FILE__, __func__, __LINE__); \
- return {std::move(error)}; \
- } \
- VAR = DAWN_LOCAL_VAR.AcquireSuccess(); \
- } \
- for (;;) \
+#define DAWN_TRY_ASSIGN(VAR, EXPR) \
+ { \
+ auto DAWN_LOCAL_VAR = EXPR; \
+ if (DAWN_UNLIKELY(DAWN_LOCAL_VAR.IsError())) { \
+ std::unique_ptr<ErrorData> error = DAWN_LOCAL_VAR.AcquireError(); \
+ error->AppendBacktrace(__FILE__, __func__, __LINE__); \
+ return {std::move(error)}; \
+ } \
+ VAR = DAWN_LOCAL_VAR.AcquireSuccess(); \
+ } \
+ for (;;) \
break
- // Implementation detail of DAWN_TRY and DAWN_TRY_ASSIGN's adding to the Error's backtrace.
- void AppendBacktrace(ErrorData* error, const char* file, const char* function, int line);
+ // Assert that errors are device loss so that we can continue with destruction
+ void AssertAndIgnoreDeviceLossError(MaybeError maybeError);
- // Implementation detail of DAWN_MAKE_ERROR
- ErrorData* MakeError(InternalErrorType type,
- std::string message,
- const char* file,
- const char* function,
- int line);
+ wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
+ InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
index 2cd01da2773..41d0c297021 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
@@ -19,7 +19,15 @@
namespace dawn_native {
- ErrorData::ErrorData() = default;
+ std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
+ std::string message,
+ const char* file,
+ const char* function,
+ int line) {
+ std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
+ error->AppendBacktrace(file, function, line);
+ return error;
+ }
ErrorData::ErrorData(InternalErrorType type, std::string message)
: mType(type), mMessage(std::move(message)) {
@@ -34,23 +42,10 @@ namespace dawn_native {
mBacktrace.push_back(std::move(record));
}
- InternalErrorType ErrorData::GetInternalType() const {
+ InternalErrorType ErrorData::GetType() const {
return mType;
}
- wgpu::ErrorType ErrorData::GetType() const {
- switch (mType) {
- case InternalErrorType::Validation:
- return wgpu::ErrorType::Validation;
- case InternalErrorType::OutOfMemory:
- return wgpu::ErrorType::OutOfMemory;
- case InternalErrorType::DeviceLost:
- return wgpu::ErrorType::DeviceLost;
- default:
- return wgpu::ErrorType::Unknown;
- }
- }
-
const std::string& ErrorData::GetMessage() const {
return mMessage;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.h b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
index a73d90dd234..02486020e07 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.h
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
@@ -15,7 +15,10 @@
#ifndef DAWNNATIVE_ERRORDATA_H_
#define DAWNNATIVE_ERRORDATA_H_
+#include "common/Compiler.h"
+
#include <cstdint>
+#include <memory>
#include <string>
#include <vector>
@@ -28,12 +31,15 @@ namespace dawn {
}
namespace dawn_native {
-
enum class InternalErrorType : uint32_t;
- class ErrorData {
+ class DAWN_NO_DISCARD ErrorData {
public:
- ErrorData();
+ static DAWN_NO_DISCARD std::unique_ptr<ErrorData> Create(InternalErrorType type,
+ std::string message,
+ const char* file,
+ const char* function,
+ int line);
ErrorData(InternalErrorType type, std::string message);
struct BacktraceRecord {
@@ -43,8 +49,7 @@ namespace dawn_native {
};
void AppendBacktrace(const char* file, const char* function, int line);
- InternalErrorType GetInternalType() const;
- wgpu::ErrorType GetType() const;
+ InternalErrorType GetType() const;
const std::string& GetMessage() const;
const std::vector<BacktraceRecord>& GetBacktrace() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp
new file mode 100644
index 00000000000..836ef1ecab6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.cpp
@@ -0,0 +1,70 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/ErrorInjector.h"
+
+#include "common/Assert.h"
+#include "dawn_native/DawnNative.h"
+
+namespace dawn_native {
+
+ namespace {
+
+ bool sIsEnabled = false;
+ uint64_t sNextIndex = 0;
+ uint64_t sInjectedFailureIndex = 0;
+ bool sHasPendingInjectedError = false;
+
+ } // anonymous namespace
+
+ void EnableErrorInjector() {
+ sIsEnabled = true;
+ }
+
+ void DisableErrorInjector() {
+ sIsEnabled = false;
+ }
+
+ void ClearErrorInjector() {
+ sNextIndex = 0;
+ sHasPendingInjectedError = false;
+ }
+
+ bool ErrorInjectorEnabled() {
+ return sIsEnabled;
+ }
+
+ uint64_t AcquireErrorInjectorCallCount() {
+ uint64_t count = sNextIndex;
+ ClearErrorInjector();
+ return count;
+ }
+
+ bool ShouldInjectError() {
+ uint64_t index = sNextIndex++;
+ if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
+ sHasPendingInjectedError = false;
+ return true;
+ }
+ return false;
+ }
+
+ void InjectErrorAt(uint64_t index) {
+ // Only one error can be injected at a time.
+ ASSERT(!sHasPendingInjectedError);
+ sInjectedFailureIndex = index;
+ sHasPendingInjectedError = true;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h
new file mode 100644
index 00000000000..4d7d2b8a2b6
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorInjector.h
@@ -0,0 +1,68 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ERRORINJECTOR_H_
+#define DAWNNATIVE_ERRORINJECTOR_H_
+
+#include <stdint.h>
+#include <type_traits>
+
+namespace dawn_native {
+
+ template <typename ErrorType>
+ struct InjectedErrorResult {
+ ErrorType error;
+ bool injected;
+ };
+
+ bool ErrorInjectorEnabled();
+
+ bool ShouldInjectError();
+
+ template <typename ErrorType>
+ InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
+ return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+ }
+
+ template <typename ErrorType, typename... ErrorTypes>
+ InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
+ if (ShouldInjectError()) {
+ return InjectedErrorResult<ErrorType>{errorType, true};
+ }
+ return MaybeInjectError(errorTypes...);
+ }
+
+} // namespace dawn_native
+
+#if defined(DAWN_ENABLE_ERROR_INJECTION)
+
+# define INJECT_ERROR_OR_RUN(stmt, ...) \
+ [&]() { \
+ if (DAWN_UNLIKELY(::dawn_native::ErrorInjectorEnabled())) { \
+ /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
+ auto injectedError = ::dawn_native::MaybeInjectError(__VA_ARGS__); \
+ if (injectedError.injected) { \
+ return injectedError.error; \
+ } \
+ } \
+ return (stmt); \
+ }()
+
+#else
+
+# define INJECT_ERROR_OR_RUN(stmt, ...) stmt
+
+#endif
+
+#endif // DAWNNATIVE_ERRORINJECTOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.cpp b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
index 1ad89b95931..f16274e9d3a 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
@@ -66,8 +66,9 @@ namespace dawn_native {
void Fence::OnCompletion(uint64_t value,
wgpu::FenceOnCompletionCallback callback,
void* userdata) {
- if (GetDevice()->ConsumedError(ValidateOnCompletion(value))) {
- callback(WGPUFenceCompletionStatus_Error, userdata);
+ WGPUFenceCompletionStatus status;
+ if (GetDevice()->ConsumedError(ValidateOnCompletion(value, &status))) {
+ callback(status, userdata);
return;
}
ASSERT(!IsError());
@@ -106,16 +107,28 @@ namespace dawn_native {
mCompletedValue = completedValue;
for (auto& request : mRequests.IterateUpTo(mCompletedValue)) {
- request.completionCallback(WGPUFenceCompletionStatus_Success, request.userdata);
+ if (GetDevice()->IsLost()) {
+ request.completionCallback(WGPUFenceCompletionStatus_DeviceLost, request.userdata);
+ } else {
+ request.completionCallback(WGPUFenceCompletionStatus_Success, request.userdata);
+ }
}
mRequests.ClearUpTo(mCompletedValue);
}
- MaybeError Fence::ValidateOnCompletion(uint64_t value) const {
+ MaybeError Fence::ValidateOnCompletion(uint64_t value,
+ WGPUFenceCompletionStatus* status) const {
+ *status = WGPUFenceCompletionStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ *status = WGPUFenceCompletionStatus_Error;
DAWN_TRY(GetDevice()->ValidateObject(this));
+
if (value > mSignalValue) {
return DAWN_VALIDATION_ERROR("Value greater than fence signaled value");
}
+
+ *status = WGPUFenceCompletionStatus_Success;
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.h b/chromium/third_party/dawn/src/dawn_native/Fence.h
index 1211ecbf8fc..a9845f9cb08 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.h
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.h
@@ -51,7 +51,7 @@ namespace dawn_native {
private:
Fence(DeviceBase* device, ObjectBase::ErrorTag tag);
- MaybeError ValidateOnCompletion(uint64_t value) const;
+ MaybeError ValidateOnCompletion(uint64_t value, WGPUFenceCompletionStatus* status) const;
struct OnCompletionData {
wgpu::FenceOnCompletionCallback completionCallback = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
index c5537848a56..0bd879b1f9b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Format.cpp
@@ -73,13 +73,13 @@ namespace dawn_native {
return aspect != Color;
}
- bool Format::HasComponentType(wgpu::TextureComponentType componentType) const {
+ bool Format::HasComponentType(Type componentType) const {
// Depth stencil textures need to be special cased but we don't support sampling them yet.
if (aspect != Color) {
return false;
}
- return TextureComponentTypeToFormatType(componentType) == type;
+ return componentType == type;
}
size_t Format::GetIndex() const {
@@ -118,12 +118,14 @@ namespace dawn_native {
};
auto AddColorFormat = [&AddFormat](wgpu::TextureFormat format, bool renderable,
- uint32_t byteSize, Type type) {
+ bool supportsStorageUsage, uint32_t byteSize,
+ Type type) {
Format internalFormat;
internalFormat.format = format;
internalFormat.isRenderable = renderable;
internalFormat.isCompressed = false;
internalFormat.isSupported = true;
+ internalFormat.supportsStorageUsage = supportsStorageUsage;
internalFormat.aspect = Aspect::Color;
internalFormat.type = type;
internalFormat.blockByteSize = byteSize;
@@ -139,6 +141,7 @@ namespace dawn_native {
internalFormat.isRenderable = true;
internalFormat.isCompressed = false;
internalFormat.isSupported = true;
+ internalFormat.supportsStorageUsage = false;
internalFormat.aspect = aspect;
internalFormat.type = Type::Other;
internalFormat.blockByteSize = byteSize;
@@ -154,6 +157,7 @@ namespace dawn_native {
internalFormat.isRenderable = false;
internalFormat.isCompressed = true;
internalFormat.isSupported = isSupported;
+ internalFormat.supportsStorageUsage = false;
internalFormat.aspect = Aspect::Color;
internalFormat.type = Type::Float;
internalFormat.blockByteSize = byteSize;
@@ -165,50 +169,50 @@ namespace dawn_native {
// clang-format off
// 1 byte color formats
- AddColorFormat(wgpu::TextureFormat::R8Unorm, true, 1, Type::Float);
- AddColorFormat(wgpu::TextureFormat::R8Snorm, false, 1, Type::Float);
- AddColorFormat(wgpu::TextureFormat::R8Uint, true, 1, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::R8Sint, true, 1, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, 1, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, 1, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R8Uint, true, false, 1, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R8Sint, true, false, 1, Type::Sint);
// 2 bytes color formats
- AddColorFormat(wgpu::TextureFormat::R16Uint, true, 2, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::R16Sint, true, 2, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::R16Float, true, 2, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, 2, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, 2, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RG8Uint, true, 2, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RG8Sint, true, 2, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R16Uint, true, false, 2, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R16Sint, true, false, 2, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R16Float, true, false, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, false, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, false, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Uint, true, false, 2, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG8Sint, true, false, 2, Type::Sint);
// 4 bytes color formats
- AddColorFormat(wgpu::TextureFormat::R32Uint, true, 4, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::R32Sint, true, 4, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::R32Float, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RG16Uint, true, 4, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RG16Sint, true, 4, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::RG16Float, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, 4, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, 4, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, 4, Type::Float);
-
- AddColorFormat(wgpu::TextureFormat::RG11B10Float, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R32Uint, true, true, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R32Sint, true, true, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R32Float, true, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG16Uint, true, false, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG16Sint, true, false, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RG16Float, true, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, true, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, true, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, false, 4, Type::Float);
+
+ AddColorFormat(wgpu::TextureFormat::RG11B10Float, false, false, 4, Type::Float);
// 8 bytes color formats
- AddColorFormat(wgpu::TextureFormat::RG32Uint, true, 8, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RG32Sint, true, 8, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::RG32Float, true, 8, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, 8, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, 8, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, 8, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG32Uint, true, true, 8, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG32Sint, true, true, 8, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RG32Float, true, true, 8, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, true, 8, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, true, 8, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, true, 8, Type::Float);
// 16 bytes color formats
- AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, 16, Type::Uint);
- AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, 16, Type::Sint);
- AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, 16, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, true, 16, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, true, 16, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, true, 16, Type::Float);
// Depth stencil formats
AddDepthStencilFormat(wgpu::TextureFormat::Depth32Float, Aspect::Depth, 4);
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
index f1e4fbb4874..82b40d81cce 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ b/chromium/third_party/dawn/src/dawn_native/Format.h
@@ -50,6 +50,7 @@ namespace dawn_native {
bool isCompressed;
// A format can be known but not supported because it is part of a disabled extension.
bool isSupported;
+ bool supportsStorageUsage;
Aspect aspect;
Type type;
@@ -64,7 +65,7 @@ namespace dawn_native {
bool HasDepth() const;
bool HasStencil() const;
bool HasDepthOrStencil() const;
- bool HasComponentType(wgpu::TextureComponentType componentType) const;
+ bool HasComponentType(Type componentType) const;
// The index of the format in the list of all known formats: a unique number for each format
// in [0, kKnownFormatCount)
diff --git a/chromium/third_party/dawn/src/dawn_native/Forward.h b/chromium/third_party/dawn/src/dawn_native/Forward.h
index 948fbfd8ae7..05538e85eed 100644
--- a/chromium/third_party/dawn/src/dawn_native/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/Forward.h
@@ -38,9 +38,11 @@ namespace dawn_native {
class RenderPipelineBase;
class ResourceHeapBase;
class SamplerBase;
+ class Surface;
class ShaderModuleBase;
class StagingBufferBase;
class SwapChainBase;
+ class NewSwapChainBase;
class TextureBase;
class TextureViewBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
index 3fc7ae889bd..982c9797281 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
@@ -15,9 +15,9 @@
#include "dawn_native/Instance.h"
#include "common/Assert.h"
+#include "common/Log.h"
#include "dawn_native/ErrorData.h"
-
-#include <iostream>
+#include "dawn_native/Surface.h"
namespace dawn_native {
@@ -51,6 +51,19 @@ namespace dawn_native {
// InstanceBase
+ // static
+ InstanceBase* InstanceBase::Create(const InstanceDescriptor* descriptor) {
+ Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
+ if (!instance->Initialize(descriptor)) {
+ return nullptr;
+ }
+ return instance.Detach();
+ }
+
+ bool InstanceBase::Initialize(const InstanceDescriptor*) {
+ return true;
+ }
+
void InstanceBase::DiscoverDefaultAdapters() {
EnsureBackendConnections();
@@ -108,7 +121,7 @@ namespace dawn_native {
return;
}
- auto Register = [this](BackendConnection* connection, BackendType expectedType) {
+ auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
if (connection != nullptr) {
ASSERT(connection->GetType() == expectedType);
ASSERT(connection->GetInstance() == this);
@@ -117,25 +130,25 @@ namespace dawn_native {
};
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- Register(d3d12::Connect(this), BackendType::D3D12);
+ Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
#endif // defined(DAWN_ENABLE_BACKEND_D3D12)
#if defined(DAWN_ENABLE_BACKEND_METAL)
- Register(metal::Connect(this), BackendType::Metal);
+ Register(metal::Connect(this), wgpu::BackendType::Metal);
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- Register(vulkan::Connect(this), BackendType::Vulkan);
+ Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- Register(opengl::Connect(this), BackendType::OpenGL);
+ Register(opengl::Connect(this), wgpu::BackendType::OpenGL);
#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
#if defined(DAWN_ENABLE_BACKEND_NULL)
- Register(null::Connect(this), BackendType::Null);
+ Register(null::Connect(this), wgpu::BackendType::Null);
#endif // defined(DAWN_ENABLE_BACKEND_NULL)
mBackendsConnected = true;
}
- ResultOrError<BackendConnection*> InstanceBase::FindBackend(BackendType type) {
+ ResultOrError<BackendConnection*> InstanceBase::FindBackend(wgpu::BackendType type) {
for (std::unique_ptr<BackendConnection>& backend : mBackends) {
if (backend->GetType() == type) {
return backend.get();
@@ -149,7 +162,7 @@ namespace dawn_native {
EnsureBackendConnections();
BackendConnection* backend;
- DAWN_TRY_ASSIGN(backend, FindBackend(options->backendType));
+ DAWN_TRY_ASSIGN(backend, FindBackend(static_cast<wgpu::BackendType>(options->backendType)));
std::vector<std::unique_ptr<AdapterBase>> newAdapters;
DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
@@ -165,11 +178,10 @@ namespace dawn_native {
bool InstanceBase::ConsumedError(MaybeError maybeError) {
if (maybeError.IsError()) {
- ErrorData* error = maybeError.AcquireError();
+ std::unique_ptr<ErrorData> error = maybeError.AcquireError();
ASSERT(error != nullptr);
- std::cout << error->GetMessage() << std::endl;
- delete error;
+ dawn::InfoLog() << error->GetMessage();
return true;
}
@@ -200,4 +212,12 @@ namespace dawn_native {
return mPlatform;
}
+ Surface* InstanceBase::CreateSurface(const SurfaceDescriptor* descriptor) {
+ if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
+ return nullptr;
+ }
+
+ return new Surface(this, descriptor);
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.h b/chromium/third_party/dawn/src/dawn_native/Instance.h
index 1e704e093e0..e07d04b6b48 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.h
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.h
@@ -18,7 +18,9 @@
#include "dawn_native/Adapter.h"
#include "dawn_native/BackendConnection.h"
#include "dawn_native/Extensions.h"
+#include "dawn_native/RefCounted.h"
#include "dawn_native/Toggles.h"
+#include "dawn_native/dawn_platform.h"
#include <array>
#include <memory>
@@ -27,15 +29,13 @@
namespace dawn_native {
+ class Surface;
+
// This is called InstanceBase for consistency across the frontend, even if the backends don't
// specialize this class.
- class InstanceBase final {
+ class InstanceBase final : public RefCounted {
public:
- InstanceBase() = default;
- ~InstanceBase() = default;
-
- InstanceBase(const InstanceBase& other) = delete;
- InstanceBase& operator=(const InstanceBase& other) = delete;
+ static InstanceBase* Create(const InstanceDescriptor* descriptor = nullptr);
void DiscoverDefaultAdapters();
bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
@@ -66,12 +66,23 @@ namespace dawn_native {
void SetPlatform(dawn_platform::Platform* platform);
dawn_platform::Platform* GetPlatform() const;
+ // Dawn API
+ Surface* CreateSurface(const SurfaceDescriptor* descriptor);
+
private:
+ InstanceBase() = default;
+ ~InstanceBase() = default;
+
+ InstanceBase(const InstanceBase& other) = delete;
+ InstanceBase& operator=(const InstanceBase& other) = delete;
+
+ bool Initialize(const InstanceDescriptor* descriptor);
+
// Lazily creates connections to all backends that have been compiled.
void EnsureBackendConnections();
// Finds the BackendConnection for `type` or returns an error.
- ResultOrError<BackendConnection*> FindBackend(BackendType type);
+ ResultOrError<BackendConnection*> FindBackend(wgpu::BackendType type);
MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index ad819d1f0bf..4a0b6530227 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -66,21 +66,22 @@ namespace dawn_native {
return mLayout.Get();
}
- MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t group) {
+ MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
- if (group >= kMaxBindGroups) {
+ if (groupIndex >= kMaxBindGroups) {
return DAWN_VALIDATION_ERROR("Bind group layout index out of bounds");
}
return {};
}
- BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t group) {
- if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(group))) {
+ BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t groupIndex) {
+ if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(groupIndex))) {
return BindGroupLayoutBase::MakeError(GetDevice());
}
- if (!mLayout->GetBindGroupLayoutsMask()[group]) {
+ if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
// Get or create an empty bind group layout.
// TODO(enga): Consider caching this object on the Device and reusing it.
// Today, this can't be done correctly because of the order of Device destruction.
@@ -98,7 +99,7 @@ namespace dawn_native {
return bgl;
}
- BindGroupLayoutBase* bgl = mLayout->GetBindGroupLayout(group);
+ BindGroupLayoutBase* bgl = mLayout->GetBindGroupLayout(groupIndex);
bgl->Reference();
return bgl;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index 29c838602ba..d248e49d889 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -38,7 +38,7 @@ namespace dawn_native {
wgpu::ShaderStage GetStageMask() const;
PipelineLayoutBase* GetLayout();
const PipelineLayoutBase* GetLayout() const;
- BindGroupLayoutBase* GetBindGroupLayout(uint32_t group);
+ BindGroupLayoutBase* GetBindGroupLayout(uint32_t groupIndex);
protected:
PipelineBase(DeviceBase* device, PipelineLayoutBase* layout, wgpu::ShaderStage stages);
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index b76e553377a..46a132b365c 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -33,6 +33,31 @@ namespace dawn_native {
lhs.textureComponentType == rhs.textureComponentType;
}
+ wgpu::ShaderStage GetShaderStageVisibilityWithBindingType(wgpu::BindingType bindingType) {
+ // TODO(jiawei.shao@intel.com): support read-only and read-write storage textures.
+ switch (bindingType) {
+ case wgpu::BindingType::StorageBuffer:
+ return wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute;
+
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ return wgpu::ShaderStage::Compute;
+
+ case wgpu::BindingType::StorageTexture:
+ UNREACHABLE();
+ return wgpu::ShaderStage::None;
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ return wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment |
+ wgpu::ShaderStage::Compute;
+ }
+
+ return {};
+ }
+
} // anonymous namespace
MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
@@ -105,11 +130,8 @@ namespace dawn_native {
std::array<std::array<BindGroupLayoutBinding, kMaxBindingsPerGroup>, kMaxBindGroups>
bindingData = {};
- // Bitsets of used bindings
- std::array<std::bitset<kMaxBindingsPerGroup>, kMaxBindGroups> usedBindings = {};
-
- // A flat map of bindings to the index in |bindingData|
- std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups> usedBindingsMap = {};
+ // A map of bindings to the index in |bindingData|
+ std::array<std::map<BindingNumber, BindingIndex>, kMaxBindGroups> usedBindingsMap = {};
// A counter of how many bindings we've populated in |bindingData|
std::array<uint32_t, kMaxBindGroups> bindingCounts = {};
@@ -120,44 +142,51 @@ namespace dawn_native {
const ShaderModuleBase::ModuleBindingInfo& info = module->GetBindingInfo();
for (uint32_t group = 0; group < info.size(); ++group) {
- for (uint32_t binding = 0; binding < info[group].size(); ++binding) {
- const ShaderModuleBase::BindingInfo& bindingInfo = info[group][binding];
- if (!bindingInfo.used) {
- continue;
- }
+ for (const auto& it : info[group]) {
+ BindingNumber bindingNumber = it.first;
+ const ShaderModuleBase::ShaderBindingInfo& bindingInfo = it.second;
if (bindingInfo.multisampled) {
return DAWN_VALIDATION_ERROR("Multisampled textures not supported (yet)");
}
BindGroupLayoutBinding bindingSlot;
- bindingSlot.binding = binding;
- bindingSlot.visibility = wgpu::ShaderStage::Vertex |
- wgpu::ShaderStage::Fragment |
- wgpu::ShaderStage::Compute;
+ bindingSlot.binding = bindingNumber;
+
+ DAWN_TRY(ValidateBindingTypeWithShaderStageVisibility(
+ bindingInfo.type, StageBit(module->GetExecutionModel())));
+ DAWN_TRY(ValidateStorageTextureFormat(device, bindingInfo.type,
+ bindingInfo.storageTextureFormat));
+
+ bindingSlot.visibility =
+ GetShaderStageVisibilityWithBindingType(bindingInfo.type);
+
bindingSlot.type = bindingInfo.type;
bindingSlot.hasDynamicOffset = false;
bindingSlot.multisampled = bindingInfo.multisampled;
bindingSlot.textureDimension = bindingInfo.textureDimension;
bindingSlot.textureComponentType =
Format::FormatTypeToTextureComponentType(bindingInfo.textureComponentType);
-
- if (usedBindings[group][binding]) {
- if (bindingSlot == bindingData[group][usedBindingsMap[group][binding]]) {
- // Already used and the data is the same. Continue.
- continue;
- } else {
- return DAWN_VALIDATION_ERROR(
- "Duplicate binding in default pipeline layout initialization not "
- "compatible with previous declaration");
+ bindingSlot.storageTextureFormat = bindingInfo.storageTextureFormat;
+
+ {
+ const auto& it = usedBindingsMap[group].find(bindingNumber);
+ if (it != usedBindingsMap[group].end()) {
+ if (bindingSlot == bindingData[group][it->second]) {
+ // Already used and the data is the same. Continue.
+ continue;
+ } else {
+ return DAWN_VALIDATION_ERROR(
+ "Duplicate binding in default pipeline layout initialization "
+ "not compatible with previous declaration");
+ }
}
}
uint32_t currentBindingCount = bindingCounts[group];
bindingData[group][currentBindingCount] = bindingSlot;
- usedBindingsMap[group][binding] = currentBindingCount;
- usedBindings[group].set(binding);
+ usedBindingsMap[group][bindingNumber] = currentBindingCount;
bindingCounts[group]++;
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index bedf0c44176..5562dcf17f4 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -29,36 +29,42 @@ namespace dawn_native {
namespace {
void TrackBindGroupResourceUsage(PassResourceUsageTracker* usageTracker,
BindGroupBase* group) {
- const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
-
- for (uint32_t i : IterateBitSet(layoutInfo.mask)) {
- wgpu::BindingType type = layoutInfo.types[i];
+ for (BindingIndex bindingIndex = 0;
+ bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ wgpu::BindingType type = group->GetLayout()->GetBindingInfo(bindingIndex).type;
switch (type) {
case wgpu::BindingType::UniformBuffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
- } break;
+ break;
+ }
case wgpu::BindingType::StorageBuffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
- } break;
+ break;
+ }
case wgpu::BindingType::SampledTexture: {
- TextureBase* texture = group->GetBindingAsTextureView(i)->GetTexture();
+ TextureBase* texture =
+ group->GetBindingAsTextureView(bindingIndex)->GetTexture();
usageTracker->TextureUsedAs(texture, wgpu::TextureUsage::Sampled);
- } break;
+ break;
+ }
case wgpu::BindingType::ReadonlyStorageBuffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
usageTracker->BufferUsedAs(buffer, kReadOnlyStorage);
- } break;
+ break;
+ }
case wgpu::BindingType::Sampler:
break;
case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
@@ -129,7 +135,22 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
}
- for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+ for (BindingIndex i = 0; i < dynamicOffsetCount; ++i) {
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
+
+ // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+ // ASSERT that this true.
+ ASSERT(bindingInfo.hasDynamicOffset);
+ switch (bindingInfo.type) {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
if (dynamicOffsets[i] % kMinDynamicBufferOffsetAlignment != 0) {
return DAWN_VALIDATION_ERROR("Dynamic Buffer Offset need to be aligned");
}
@@ -138,9 +159,9 @@ namespace dawn_native {
// During BindGroup creation, validation ensures binding offset + binding size
// <= buffer size.
- DAWN_ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
- DAWN_ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >=
- bufferBinding.offset);
+ ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+ ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >=
+ bufferBinding.offset);
if ((dynamicOffsets[i] > bufferBinding.buffer->GetSize() -
bufferBinding.offset - bufferBinding.size)) {
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 0fbcdc7d052..1876f9bce43 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -32,8 +32,26 @@ namespace dawn_native {
QueueBase::QueueBase(DeviceBase* device) : ObjectBase(device) {
}
+ QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ObjectBase(device, tag) {
+ }
+
+ // static
+ QueueBase* QueueBase::MakeError(DeviceBase* device) {
+ return new QueueBase(device, ObjectBase::kError);
+ }
+
+ MaybeError QueueBase::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+ UNREACHABLE();
+ return {};
+ }
+
void QueueBase::Submit(uint32_t commandCount, CommandBufferBase* const* commands) {
DeviceBase* device = GetDevice();
+ if (device->ConsumedError(device->ValidateIsAlive())) {
+ // If device is lost, don't let any commands be submitted
+ return;
+ }
+
TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
if (device->IsValidationEnabled() &&
device->ConsumedError(ValidateSubmit(commandCount, commands))) {
@@ -100,6 +118,7 @@ namespace dawn_native {
}
MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(fence));
@@ -114,6 +133,7 @@ namespace dawn_native {
}
MaybeError QueueBase::ValidateCreateFence(const FenceDescriptor* descriptor) {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(ValidateFenceDescriptor(descriptor));
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index 4adec04e11f..fd9d291f00c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -27,14 +27,17 @@ namespace dawn_native {
public:
QueueBase(DeviceBase* device);
+ static QueueBase* MakeError(DeviceBase* device);
+
// Dawn API
void Submit(uint32_t commandCount, CommandBufferBase* const* commands);
void Signal(Fence* fence, uint64_t signalValue);
Fence* CreateFence(const FenceDescriptor* descriptor);
private:
- virtual MaybeError SubmitImpl(uint32_t commandCount,
- CommandBufferBase* const* commands) = 0;
+ QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+ virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands);
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands);
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue);
diff --git a/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp b/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
index 6782c144153..a0e50b8b75a 100644
--- a/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
@@ -16,6 +16,8 @@
#include "common/Assert.h"
+#include <cstddef>
+
namespace dawn_native {
static constexpr size_t kPayloadBits = 1;
@@ -43,14 +45,36 @@ namespace dawn_native {
void RefCounted::Reference() {
ASSERT((mRefCount & ~kPayloadMask) != 0);
- mRefCount += kRefCountIncrement;
+
+ // The relaxed ordering guarantees only the atomicity of the update, which is enough here
+ // because the reference we are copying from still exists and makes sure other threads
+ // don't delete `this`.
+ // See the explanation in the Boost documentation:
+ // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+ mRefCount.fetch_add(kRefCountIncrement, std::memory_order_relaxed);
}
void RefCounted::Release() {
ASSERT((mRefCount & ~kPayloadMask) != 0);
- mRefCount -= kRefCountIncrement;
- if (mRefCount < kRefCountIncrement) {
+ // The release fence here is to make sure all accesses to the object on a thread A
+ // happen-before the object is deleted on a thread B. The release memory order ensures that
+ // all accesses on thread A happen-before the refcount is decreased and the atomic variable
+ // makes sure the refcount decrease in A happens-before the refcount decrease in B. Finally
+ // the acquire fence in the destruction case makes sure the refcount decrease in B
+ // happens-before the `delete this`.
+ //
+ // See the explanation in the Boost documentation:
+ // https://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+ uint64_t previousRefCount =
+ mRefCount.fetch_sub(kRefCountIncrement, std::memory_order_release);
+
+ // Check that the previous reference count was strictly less than 2, ignoring payload bits.
+ if (previousRefCount < 2 * kRefCountIncrement) {
+ // Note that on ARM64 this will generate a `dmb ish` instruction which is a global
+ // memory barrier, when an acquire load on mRefCount (using the `ldar` instruction)
+ // should be enough and could end up being faster.
+ std::atomic_thread_fence(std::memory_order_acquire);
delete this;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index 885f7a11803..532c448b591 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -27,13 +27,17 @@
namespace dawn_native {
RenderEncoderBase::RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext)
- : ProgrammablePassEncoder(device, encodingContext) {
+ : ProgrammablePassEncoder(device, encodingContext),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
}
RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
EncodingContext* encodingContext,
ErrorTag errorTag)
- : ProgrammablePassEncoder(device, encodingContext, errorTag) {
+ : ProgrammablePassEncoder(device, encodingContext, errorTag),
+ mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+ mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
}
void RenderEncoderBase::Draw(uint32_t vertexCount,
@@ -41,6 +45,10 @@ namespace dawn_native {
uint32_t firstVertex,
uint32_t firstInstance) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (mDisableBaseInstance && firstInstance != 0) {
+ return DAWN_VALIDATION_ERROR("Non-zero first instance not supported");
+ }
+
DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
draw->vertexCount = vertexCount;
draw->instanceCount = instanceCount;
@@ -57,6 +65,13 @@ namespace dawn_native {
int32_t baseVertex,
uint32_t firstInstance) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (mDisableBaseInstance && firstInstance != 0) {
+ return DAWN_VALIDATION_ERROR("Non-zero first instance not supported");
+ }
+ if (mDisableBaseInstance && baseVertex != 0) {
+ return DAWN_VALIDATION_ERROR("Non-zero base vertex not supported");
+ }
+
DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
draw->indexCount = indexCount;
draw->instanceCount = instanceCount;
@@ -139,6 +154,10 @@ namespace dawn_native {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(buffer));
+ if (slot >= kMaxVertexBuffers) {
+ return DAWN_VALIDATION_ERROR("Vertex buffer slot out of bounds");
+ }
+
SetVertexBufferCmd* cmd =
allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
cmd->slot = slot;
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
index 906c9e09af7..33343ae4a0c 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.h
@@ -45,6 +45,10 @@ namespace dawn_native {
protected:
// Construct an "error" render encoder base.
RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+
+ private:
+ const bool mDisableBaseVertex;
+ const bool mDisableBaseInstance;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index be870403875..dc4a508c29b 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -20,6 +20,8 @@
#include "dawn_native/Device.h"
#include "dawn_native/ValidationUtils_autogen.h"
+#include <cmath>
+
namespace dawn_native {
// Helper functions
namespace {
@@ -115,8 +117,15 @@ namespace dawn_native {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
+
DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+
+ if (std::isnan(descriptor->depthBiasSlopeScale) ||
+ std::isnan(descriptor->depthBiasClamp)) {
+ return DAWN_VALIDATION_ERROR("Depth bias parameters must not be NaN.");
+ }
+
return {};
}
@@ -709,6 +718,12 @@ namespace dawn_native {
if (descA.frontFace != descB.frontFace || descA.cullMode != descB.cullMode) {
return false;
}
+
+ ASSERT(!std::isnan(descA.depthBiasSlopeScale));
+ ASSERT(!std::isnan(descB.depthBiasSlopeScale));
+ ASSERT(!std::isnan(descA.depthBiasClamp));
+ ASSERT(!std::isnan(descB.depthBiasClamp));
+
if (descA.depthBias != descB.depthBias ||
descA.depthBiasSlopeScale != descB.depthBiasSlopeScale ||
descA.depthBiasClamp != descB.depthBiasClamp) {
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
index a66129ac649..1eb68e75526 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
@@ -31,6 +31,9 @@ namespace dawn_native {
// Memory sub-divided using one or more blocks of various sizes.
kSubAllocated,
+ // Memory was allocated outside of Dawn.
+ kExternal,
+
// Memory not allocated or freed.
kInvalid
};
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
index 230da340e02..034a8c3eabf 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
@@ -27,12 +27,12 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- if (!std::isfinite(descriptor->lodMinClamp) || !std::isfinite(descriptor->lodMaxClamp)) {
- return DAWN_VALIDATION_ERROR("LOD must be finite");
+ if (std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp)) {
+ return DAWN_VALIDATION_ERROR("LOD clamp bounds must not be NaN");
}
if (descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0) {
- return DAWN_VALIDATION_ERROR("LOD must be positive");
+ return DAWN_VALIDATION_ERROR("LOD clamp bounds must be positive");
}
if (descriptor->lodMinClamp > descriptor->lodMaxClamp) {
@@ -101,10 +101,10 @@ namespace dawn_native {
return true;
}
- ASSERT(std::isfinite(a->mLodMinClamp));
- ASSERT(std::isfinite(b->mLodMinClamp));
- ASSERT(std::isfinite(a->mLodMaxClamp));
- ASSERT(std::isfinite(b->mLodMaxClamp));
+ ASSERT(!std::isnan(a->mLodMinClamp));
+ ASSERT(!std::isnan(b->mLodMinClamp));
+ ASSERT(!std::isnan(a->mLodMaxClamp));
+ ASSERT(!std::isnan(b->mLodMaxClamp));
return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index dfa9acf4a92..0cd96921235 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -66,6 +66,217 @@ namespace dawn_native {
}
}
+ wgpu::TextureViewDimension ToWGPUTextureViewDimension(
+ shaderc_spvc_texture_view_dimension dim) {
+ switch (dim) {
+ case shaderc_spvc_texture_view_dimension_undefined:
+ return wgpu::TextureViewDimension::Undefined;
+ case shaderc_spvc_texture_view_dimension_e1D:
+ return wgpu::TextureViewDimension::e1D;
+ case shaderc_spvc_texture_view_dimension_e2D:
+ return wgpu::TextureViewDimension::e2D;
+ case shaderc_spvc_texture_view_dimension_e2D_array:
+ return wgpu::TextureViewDimension::e2DArray;
+ case shaderc_spvc_texture_view_dimension_cube:
+ return wgpu::TextureViewDimension::Cube;
+ case shaderc_spvc_texture_view_dimension_cube_array:
+ return wgpu::TextureViewDimension::CubeArray;
+ case shaderc_spvc_texture_view_dimension_e3D:
+ return wgpu::TextureViewDimension::e3D;
+ }
+ UNREACHABLE();
+ }
+
+ Format::Type ToDawnFormatType(shaderc_spvc_texture_format_type type) {
+ switch (type) {
+ case shaderc_spvc_texture_format_type_float:
+ return Format::Type::Float;
+ case shaderc_spvc_texture_format_type_sint:
+ return Format::Type::Sint;
+ case shaderc_spvc_texture_format_type_uint:
+ return Format::Type::Uint;
+ case shaderc_spvc_texture_format_type_other:
+ return Format::Type::Other;
+ }
+ UNREACHABLE();
+ }
+
+ wgpu::BindingType ToWGPUBindingType(shaderc_spvc_binding_type type) {
+ switch (type) {
+ case shaderc_spvc_binding_type_uniform_buffer:
+ return wgpu::BindingType::UniformBuffer;
+ case shaderc_spvc_binding_type_storage_buffer:
+ return wgpu::BindingType::StorageBuffer;
+ case shaderc_spvc_binding_type_readonly_storage_buffer:
+ return wgpu::BindingType::ReadonlyStorageBuffer;
+ case shaderc_spvc_binding_type_sampler:
+ return wgpu::BindingType::Sampler;
+ case shaderc_spvc_binding_type_sampled_texture:
+ return wgpu::BindingType::SampledTexture;
+ case shaderc_spvc_binding_type_readonly_storage_texture:
+ return wgpu::BindingType::ReadonlyStorageTexture;
+ case shaderc_spvc_binding_type_writeonly_storage_texture:
+ return wgpu::BindingType::WriteonlyStorageTexture;
+ case shaderc_spvc_binding_type_storage_texture:
+ return wgpu::BindingType::StorageTexture;
+ }
+ UNREACHABLE();
+ }
+
+ SingleShaderStage ToSingleShaderStage(shaderc_spvc_execution_model execution_model) {
+ switch (execution_model) {
+ case shaderc_spvc_execution_model_vertex:
+ return SingleShaderStage::Vertex;
+ case shaderc_spvc_execution_model_fragment:
+ return SingleShaderStage::Fragment;
+ case shaderc_spvc_execution_model_glcompute:
+ return SingleShaderStage::Compute;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ wgpu::TextureFormat ToWGPUTextureFormat(spv::ImageFormat format) {
+ switch (format) {
+ case spv::ImageFormatR8:
+ return wgpu::TextureFormat::R8Unorm;
+ case spv::ImageFormatR8Snorm:
+ return wgpu::TextureFormat::R8Snorm;
+ case spv::ImageFormatR8ui:
+ return wgpu::TextureFormat::R8Uint;
+ case spv::ImageFormatR8i:
+ return wgpu::TextureFormat::R8Sint;
+ case spv::ImageFormatR16ui:
+ return wgpu::TextureFormat::R16Uint;
+ case spv::ImageFormatR16i:
+ return wgpu::TextureFormat::R16Sint;
+ case spv::ImageFormatR16f:
+ return wgpu::TextureFormat::R16Float;
+ case spv::ImageFormatRg8:
+ return wgpu::TextureFormat::RG8Unorm;
+ case spv::ImageFormatRg8Snorm:
+ return wgpu::TextureFormat::RG8Snorm;
+ case spv::ImageFormatRg8ui:
+ return wgpu::TextureFormat::RG8Uint;
+ case spv::ImageFormatRg8i:
+ return wgpu::TextureFormat::RG8Sint;
+ case spv::ImageFormatR32f:
+ return wgpu::TextureFormat::R32Float;
+ case spv::ImageFormatR32ui:
+ return wgpu::TextureFormat::R32Uint;
+ case spv::ImageFormatR32i:
+ return wgpu::TextureFormat::R32Sint;
+ case spv::ImageFormatRg16ui:
+ return wgpu::TextureFormat::RG16Uint;
+ case spv::ImageFormatRg16i:
+ return wgpu::TextureFormat::RG16Sint;
+ case spv::ImageFormatRg16f:
+ return wgpu::TextureFormat::RG16Float;
+ case spv::ImageFormatRgba8:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case spv::ImageFormatRgba8Snorm:
+ return wgpu::TextureFormat::RGBA8Snorm;
+ case spv::ImageFormatRgba8ui:
+ return wgpu::TextureFormat::RGBA8Uint;
+ case spv::ImageFormatRgba8i:
+ return wgpu::TextureFormat::RGBA8Sint;
+ case spv::ImageFormatRgb10A2:
+ return wgpu::TextureFormat::RGB10A2Unorm;
+ case spv::ImageFormatR11fG11fB10f:
+ return wgpu::TextureFormat::RG11B10Float;
+ case spv::ImageFormatRg32f:
+ return wgpu::TextureFormat::RG32Float;
+ case spv::ImageFormatRg32ui:
+ return wgpu::TextureFormat::RG32Uint;
+ case spv::ImageFormatRg32i:
+ return wgpu::TextureFormat::RG32Sint;
+ case spv::ImageFormatRgba16ui:
+ return wgpu::TextureFormat::RGBA16Uint;
+ case spv::ImageFormatRgba16i:
+ return wgpu::TextureFormat::RGBA16Sint;
+ case spv::ImageFormatRgba16f:
+ return wgpu::TextureFormat::RGBA16Float;
+ case spv::ImageFormatRgba32f:
+ return wgpu::TextureFormat::RGBA32Float;
+ case spv::ImageFormatRgba32ui:
+ return wgpu::TextureFormat::RGBA32Uint;
+ case spv::ImageFormatRgba32i:
+ return wgpu::TextureFormat::RGBA32Sint;
+ default:
+ return wgpu::TextureFormat::Undefined;
+ }
+ }
+
+ wgpu::TextureFormat ToWGPUTextureFormat(shaderc_spvc_storage_texture_format format) {
+ switch (format) {
+ case shaderc_spvc_storage_texture_format_r8unorm:
+ return wgpu::TextureFormat::R8Unorm;
+ case shaderc_spvc_storage_texture_format_r8snorm:
+ return wgpu::TextureFormat::R8Snorm;
+ case shaderc_spvc_storage_texture_format_r8uint:
+ return wgpu::TextureFormat::R8Uint;
+ case shaderc_spvc_storage_texture_format_r8sint:
+ return wgpu::TextureFormat::R8Sint;
+ case shaderc_spvc_storage_texture_format_r16uint:
+ return wgpu::TextureFormat::R16Uint;
+ case shaderc_spvc_storage_texture_format_r16sint:
+ return wgpu::TextureFormat::R16Sint;
+ case shaderc_spvc_storage_texture_format_r16float:
+ return wgpu::TextureFormat::R16Float;
+ case shaderc_spvc_storage_texture_format_rg8unorm:
+ return wgpu::TextureFormat::RG8Unorm;
+ case shaderc_spvc_storage_texture_format_rg8snorm:
+ return wgpu::TextureFormat::RG8Snorm;
+ case shaderc_spvc_storage_texture_format_rg8uint:
+ return wgpu::TextureFormat::RG8Uint;
+ case shaderc_spvc_storage_texture_format_rg8sint:
+ return wgpu::TextureFormat::RG8Sint;
+ case shaderc_spvc_storage_texture_format_r32float:
+ return wgpu::TextureFormat::R32Float;
+ case shaderc_spvc_storage_texture_format_r32uint:
+ return wgpu::TextureFormat::R32Uint;
+ case shaderc_spvc_storage_texture_format_r32sint:
+ return wgpu::TextureFormat::R32Sint;
+ case shaderc_spvc_storage_texture_format_rg16uint:
+ return wgpu::TextureFormat::RG16Uint;
+ case shaderc_spvc_storage_texture_format_rg16sint:
+ return wgpu::TextureFormat::RG16Sint;
+ case shaderc_spvc_storage_texture_format_rg16float:
+ return wgpu::TextureFormat::RG16Float;
+ case shaderc_spvc_storage_texture_format_rgba8unorm:
+ return wgpu::TextureFormat::RGBA8Unorm;
+ case shaderc_spvc_storage_texture_format_rgba8snorm:
+ return wgpu::TextureFormat::RGBA8Snorm;
+ case shaderc_spvc_storage_texture_format_rgba8uint:
+ return wgpu::TextureFormat::RGBA8Uint;
+ case shaderc_spvc_storage_texture_format_rgba8sint:
+ return wgpu::TextureFormat::RGBA8Sint;
+ case shaderc_spvc_storage_texture_format_rgb10a2unorm:
+ return wgpu::TextureFormat::RGB10A2Unorm;
+ case shaderc_spvc_storage_texture_format_rg11b10float:
+ return wgpu::TextureFormat::RG11B10Float;
+ case shaderc_spvc_storage_texture_format_rg32float:
+ return wgpu::TextureFormat::RG32Float;
+ case shaderc_spvc_storage_texture_format_rg32uint:
+ return wgpu::TextureFormat::RG32Uint;
+ case shaderc_spvc_storage_texture_format_rg32sint:
+ return wgpu::TextureFormat::RG32Sint;
+ case shaderc_spvc_storage_texture_format_rgba16uint:
+ return wgpu::TextureFormat::RGBA16Uint;
+ case shaderc_spvc_storage_texture_format_rgba16sint:
+ return wgpu::TextureFormat::RGBA16Sint;
+ case shaderc_spvc_storage_texture_format_rgba16float:
+ return wgpu::TextureFormat::RGBA16Float;
+ case shaderc_spvc_storage_texture_format_rgba32float:
+ return wgpu::TextureFormat::RGBA32Float;
+ case shaderc_spvc_storage_texture_format_rgba32uint:
+ return wgpu::TextureFormat::RGBA32Uint;
+ case shaderc_spvc_storage_texture_format_rgba32sint:
+ return wgpu::TextureFormat::RGBA32Sint;
+ default:
+ return wgpu::TextureFormat::Undefined;
+ }
+ }
} // anonymous namespace
MaybeError ValidateShaderModuleDescriptor(DeviceBase*,
@@ -105,13 +316,16 @@ namespace dawn_native {
}
return {};
- }
+ } // namespace
// ShaderModuleBase
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
: CachedObject(device), mCode(descriptor->code, descriptor->code + descriptor->codeSize) {
mFragmentOutputFormatBaseTypes.fill(Format::Other);
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvcParser)) {
+ mSpvcContext.SetUseSpvcParser(true);
+ }
}
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -129,10 +343,177 @@ namespace dawn_native {
return new ShaderModuleBase(device, ObjectBase::kError);
}
- void ShaderModuleBase::ExtractSpirvInfo(const spirv_cross::Compiler& compiler) {
+ MaybeError ShaderModuleBase::ExtractSpirvInfo(const spirv_cross::Compiler& compiler) {
ASSERT(!IsError());
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ DAWN_TRY(ExtractSpirvInfoWithSpvc());
+ } else {
+ DAWN_TRY(ExtractSpirvInfoWithSpirvCross(compiler));
+ }
+ return {};
+ }
- DeviceBase* device = GetDevice();
+ MaybeError ShaderModuleBase::ExtractSpirvInfoWithSpvc() {
+ shaderc_spvc_execution_model execution_model;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetExecutionModel(&execution_model),
+ "Unable to get execution model for shader."));
+ mExecutionModel = ToSingleShaderStage(execution_model);
+
+ size_t push_constant_buffers_count;
+ DAWN_TRY(
+ CheckSpvcSuccess(mSpvcContext.GetPushConstantBufferCount(&push_constant_buffers_count),
+ "Unable to get push constant buffer count for shader."));
+
+ // TODO(rharrison): This should be handled by spirv-val pass in spvc,
+ // but need to confirm.
+ if (push_constant_buffers_count > 0) {
+ return DAWN_VALIDATION_ERROR("Push constants aren't supported.");
+ }
+
+ // Fill in bindingInfo with the SPIRV bindings
+ auto ExtractResourcesBinding =
+ [this](std::vector<shaderc_spvc_binding_info> bindings) -> MaybeError {
+ for (const auto& binding : bindings) {
+ if (binding.set >= kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("Bind group index over limits in the SPIRV");
+ }
+
+ const auto& it = mBindingInfo[binding.set].emplace(BindingNumber(binding.binding),
+ ShaderBindingInfo{});
+ if (!it.second) {
+ return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
+ }
+
+ ShaderBindingInfo* info = &it.first->second;
+ info->id = binding.id;
+ info->base_type_id = binding.base_type_id;
+ info->type = ToWGPUBindingType(binding.binding_type);
+
+ switch (info->type) {
+ case wgpu::BindingType::SampledTexture: {
+ info->multisampled = binding.multisampled;
+ info->textureDimension =
+ ToWGPUTextureViewDimension(binding.texture_dimension);
+ info->textureComponentType =
+ ToDawnFormatType(binding.texture_component_type);
+ break;
+ }
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ wgpu::TextureFormat storageTextureFormat =
+ ToWGPUTextureFormat(binding.storage_texture_format);
+ if (storageTextureFormat == wgpu::TextureFormat::Undefined) {
+ return DAWN_VALIDATION_ERROR(
+ "Invalid image format declaration on storage image");
+ }
+ const Format& format =
+ GetDevice()->GetValidInternalFormat(storageTextureFormat);
+ if (!format.supportsStorageUsage) {
+ return DAWN_VALIDATION_ERROR(
+ "The storage texture format is not supported");
+ }
+ info->multisampled = binding.multisampled;
+ info->storageTextureFormat = storageTextureFormat;
+ info->textureDimension =
+ ToWGPUTextureViewDimension(binding.texture_dimension);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return {};
+ };
+
+ std::vector<shaderc_spvc_binding_info> resource_bindings;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetBindingInfo(
+ shaderc_spvc_shader_resource_uniform_buffers,
+ shaderc_spvc_binding_type_uniform_buffer, &resource_bindings),
+ "Unable to get binding info for uniform buffers from shader"));
+ DAWN_TRY(ExtractResourcesBinding(resource_bindings));
+
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.GetBindingInfo(shaderc_spvc_shader_resource_separate_images,
+ shaderc_spvc_binding_type_sampled_texture,
+ &resource_bindings),
+ "Unable to get binding info for sampled textures from shader"));
+ DAWN_TRY(ExtractResourcesBinding(resource_bindings));
+
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.GetBindingInfo(shaderc_spvc_shader_resource_separate_samplers,
+ shaderc_spvc_binding_type_sampler, &resource_bindings),
+ "Unable to get binding info for samples from shader"));
+ DAWN_TRY(ExtractResourcesBinding(resource_bindings));
+
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetBindingInfo(
+ shaderc_spvc_shader_resource_storage_buffers,
+ shaderc_spvc_binding_type_storage_buffer, &resource_bindings),
+ "Unable to get binding info for storage buffers from shader"));
+ DAWN_TRY(ExtractResourcesBinding(resource_bindings));
+
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.GetBindingInfo(shaderc_spvc_shader_resource_storage_images,
+ shaderc_spvc_binding_type_storage_texture,
+ &resource_bindings),
+ "Unable to get binding info for storage textures from shader"));
+ DAWN_TRY(ExtractResourcesBinding(resource_bindings));
+
+ std::vector<shaderc_spvc_resource_location_info> input_stage_locations;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetInputStageLocationInfo(&input_stage_locations),
+ "Unable to get input stage location information from shader"));
+
+ for (const auto& input : input_stage_locations) {
+ if (mExecutionModel == SingleShaderStage::Vertex) {
+ if (input.location >= kMaxVertexAttributes) {
+ return DAWN_VALIDATION_ERROR("Attribute location over limits in the SPIRV");
+ }
+ mUsedVertexAttributes.set(input.location);
+ } else if (mExecutionModel == SingleShaderStage::Fragment) {
+ // Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives
+ // them all the location 0, causing a compile error.
+ if (!input.has_location) {
+ return DAWN_VALIDATION_ERROR("Need location qualifier on fragment input");
+ }
+ }
+ }
+
+ std::vector<shaderc_spvc_resource_location_info> output_stage_locations;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetOutputStageLocationInfo(&output_stage_locations),
+ "Unable to get output stage location information from shader"));
+
+ for (const auto& output : output_stage_locations) {
+ if (mExecutionModel == SingleShaderStage::Vertex) {
+ // Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL
+ // gives them all the location 0, causing a compile error.
+ if (!output.has_location) {
+ return DAWN_VALIDATION_ERROR("Need location qualifier on vertex output");
+ }
+ } else if (mExecutionModel == SingleShaderStage::Fragment) {
+ if (output.location >= kMaxColorAttachments) {
+ return DAWN_VALIDATION_ERROR(
+ "Fragment output location over limits in the SPIRV");
+ }
+ }
+ }
+
+ if (mExecutionModel == SingleShaderStage::Fragment) {
+ std::vector<shaderc_spvc_resource_type_info> output_types;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetOutputStageTypeInfo(&output_types),
+ "Unable to get output stage type information from shader"));
+
+ for (const auto& output : output_types) {
+ if (output.type == shaderc_spvc_texture_format_type_other) {
+ return DAWN_VALIDATION_ERROR("Unexpected Fragment output type");
+ }
+ mFragmentOutputFormatBaseTypes[output.location] = ToDawnFormatType(output.type);
+ }
+ }
+ return {};
+ }
+
+ MaybeError ShaderModuleBase::ExtractSpirvInfoWithSpirvCross(
+ const spirv_cross::Compiler& compiler) {
// TODO(cwallez@chromium.org): make errors here creation errors
// currently errors here do not prevent the shadermodule from being used
const auto& resources = compiler.get_shader_resources();
@@ -149,37 +530,45 @@ namespace dawn_native {
break;
default:
UNREACHABLE();
+ return DAWN_VALIDATION_ERROR("Unexpected shader execution model");
}
if (resources.push_constant_buffers.size() > 0) {
- GetDevice()->HandleError(wgpu::ErrorType::Validation,
- "Push constants aren't supported.");
+ return DAWN_VALIDATION_ERROR("Push constants aren't supported.");
}
// Fill in bindingInfo with the SPIRV bindings
- auto ExtractResourcesBinding = [this](const spirv_cross::SmallVector<spirv_cross::Resource>&
- resources,
- const spirv_cross::Compiler& compiler,
- wgpu::BindingType bindingType) {
+ auto ExtractResourcesBinding =
+ [this](const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
+ const spirv_cross::Compiler& compiler,
+ wgpu::BindingType bindingType) -> MaybeError {
for (const auto& resource : resources) {
- ASSERT(compiler.get_decoration_bitset(resource.id).get(spv::DecorationBinding));
- ASSERT(
- compiler.get_decoration_bitset(resource.id).get(spv::DecorationDescriptorSet));
+ if (!compiler.get_decoration_bitset(resource.id).get(spv::DecorationBinding)) {
+ return DAWN_VALIDATION_ERROR("No Binding decoration set for resource");
+ }
- uint32_t binding = compiler.get_decoration(resource.id, spv::DecorationBinding);
+ if (!compiler.get_decoration_bitset(resource.id)
+ .get(spv::DecorationDescriptorSet)) {
+ return DAWN_VALIDATION_ERROR("No Descriptor Decoration set for resource");
+ }
+
+ BindingNumber bindingNumber(
+ compiler.get_decoration(resource.id, spv::DecorationBinding));
uint32_t set = compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
- if (binding >= kMaxBindingsPerGroup || set >= kMaxBindGroups) {
- GetDevice()->HandleError(wgpu::ErrorType::Validation,
- "Binding over limits in the SPIRV");
- continue;
+ if (set >= kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("Bind group index over limits in the SPIRV");
+ }
+
+ const auto& it = mBindingInfo[set].emplace(bindingNumber, ShaderBindingInfo{});
+ if (!it.second) {
+ return DAWN_VALIDATION_ERROR("Shader has duplicate bindings");
}
- BindingInfo* info = &mBindingInfo[set][binding];
- *info = {};
- info->used = true;
+ ShaderBindingInfo* info = &it.first->second;
info->id = resource.id;
info->base_type_id = resource.base_type_id;
+
switch (bindingType) {
case wgpu::BindingType::SampledTexture: {
spirv_cross::SPIRType::ImageType imageType =
@@ -193,87 +582,125 @@ namespace dawn_native {
info->textureComponentType =
SpirvCrossBaseTypeToFormatType(textureComponentType);
info->type = bindingType;
- } break;
+ break;
+ }
case wgpu::BindingType::StorageBuffer: {
- // Differentiate between readonly storage bindings and writable ones based
- // on the NonWritable decoration
+ // Differentiate between readonly storage bindings and writable ones
+ // based on the NonWritable decoration
spirv_cross::Bitset flags = compiler.get_buffer_block_flags(resource.id);
if (flags.get(spv::DecorationNonWritable)) {
info->type = wgpu::BindingType::ReadonlyStorageBuffer;
} else {
info->type = wgpu::BindingType::StorageBuffer;
}
- } break;
+ break;
+ }
+ case wgpu::BindingType::StorageTexture: {
+ spirv_cross::Bitset flags = compiler.get_decoration_bitset(resource.id);
+ if (flags.get(spv::DecorationNonReadable)) {
+ info->type = wgpu::BindingType::WriteonlyStorageTexture;
+ } else if (flags.get(spv::DecorationNonWritable)) {
+ info->type = wgpu::BindingType::ReadonlyStorageTexture;
+ } else {
+ info->type = wgpu::BindingType::StorageTexture;
+ }
+
+ spirv_cross::SPIRType::ImageType imageType =
+ compiler.get_type(info->base_type_id).image;
+ wgpu::TextureFormat storageTextureFormat =
+ ToWGPUTextureFormat(imageType.format);
+ if (storageTextureFormat == wgpu::TextureFormat::Undefined) {
+ return DAWN_VALIDATION_ERROR(
+ "Invalid image format declaration on storage image");
+ }
+ const Format& format =
+ GetDevice()->GetValidInternalFormat(storageTextureFormat);
+ if (!format.supportsStorageUsage) {
+ return DAWN_VALIDATION_ERROR(
+ "The storage texture format is not supported");
+ }
+ info->multisampled = imageType.ms;
+ info->storageTextureFormat = storageTextureFormat;
+ info->textureDimension =
+ SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed);
+ break;
+ }
default:
info->type = bindingType;
}
}
+ return {};
};
- ExtractResourcesBinding(resources.uniform_buffers, compiler,
- wgpu::BindingType::UniformBuffer);
- ExtractResourcesBinding(resources.separate_images, compiler,
- wgpu::BindingType::SampledTexture);
- ExtractResourcesBinding(resources.separate_samplers, compiler, wgpu::BindingType::Sampler);
- ExtractResourcesBinding(resources.storage_buffers, compiler,
- wgpu::BindingType::StorageBuffer);
+ DAWN_TRY(ExtractResourcesBinding(resources.uniform_buffers, compiler,
+ wgpu::BindingType::UniformBuffer));
+ DAWN_TRY(ExtractResourcesBinding(resources.separate_images, compiler,
+ wgpu::BindingType::SampledTexture));
+ DAWN_TRY(ExtractResourcesBinding(resources.separate_samplers, compiler,
+ wgpu::BindingType::Sampler));
+ DAWN_TRY(ExtractResourcesBinding(resources.storage_buffers, compiler,
+ wgpu::BindingType::StorageBuffer));
+ DAWN_TRY(ExtractResourcesBinding(resources.storage_images, compiler,
+ wgpu::BindingType::StorageTexture));
// Extract the vertex attributes
if (mExecutionModel == SingleShaderStage::Vertex) {
for (const auto& attrib : resources.stage_inputs) {
- ASSERT(compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation));
+ if (!(compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation))) {
+ return DAWN_VALIDATION_ERROR(
+ "Unable to find Location decoration for Vertex input");
+ }
uint32_t location = compiler.get_decoration(attrib.id, spv::DecorationLocation);
if (location >= kMaxVertexAttributes) {
- device->HandleError(wgpu::ErrorType::Validation,
- "Attribute location over limits in the SPIRV");
- return;
+ return DAWN_VALIDATION_ERROR("Attribute location over limits in the SPIRV");
}
mUsedVertexAttributes.set(location);
}
- // Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL gives them
- // all the location 0, causing a compile error.
+ // Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL gives
+ // them all the location 0, causing a compile error.
for (const auto& attrib : resources.stage_outputs) {
if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- device->HandleError(wgpu::ErrorType::Validation,
- "Need location qualifier on vertex output");
- return;
+ return DAWN_VALIDATION_ERROR("Need location qualifier on vertex output");
}
}
}
if (mExecutionModel == SingleShaderStage::Fragment) {
- // Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives them
- // all the location 0, causing a compile error.
+ // Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives
+ // them all the location 0, causing a compile error.
for (const auto& attrib : resources.stage_inputs) {
if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- device->HandleError(wgpu::ErrorType::Validation,
- "Need location qualifier on fragment input");
- return;
+ return DAWN_VALIDATION_ERROR("Need location qualifier on fragment input");
}
}
for (const auto& fragmentOutput : resources.stage_outputs) {
- ASSERT(
- compiler.get_decoration_bitset(fragmentOutput.id).get(spv::DecorationLocation));
+ if (!compiler.get_decoration_bitset(fragmentOutput.id)
+ .get(spv::DecorationLocation)) {
+ return DAWN_VALIDATION_ERROR(
+ "Unable to find Location decoration for Fragment output");
+ }
uint32_t location =
compiler.get_decoration(fragmentOutput.id, spv::DecorationLocation);
if (location >= kMaxColorAttachments) {
- device->HandleError(wgpu::ErrorType::Validation,
- "Fragment output location over limits in the SPIRV");
- return;
+ return DAWN_VALIDATION_ERROR(
+ "Fragment output location over limits in the SPIRV");
}
spirv_cross::SPIRType::BaseType shaderFragmentOutputBaseType =
compiler.get_type(fragmentOutput.base_type_id).basetype;
Format::Type formatType =
SpirvCrossBaseTypeToFormatType(shaderFragmentOutputBaseType);
- ASSERT(formatType != Format::Type::Other);
+ if (formatType == Format::Type::Other) {
+ return DAWN_VALIDATION_ERROR("Unexpected Fragment output type");
+ };
mFragmentOutputFormatBaseTypes[location] = formatType;
}
}
+ return {};
}
const ShaderModuleBase::ModuleBindingInfo& ShaderModuleBase::GetBindingInfo() const {
@@ -307,10 +734,8 @@ namespace dawn_native {
}
for (uint32_t group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
- for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
- if (mBindingInfo[group][i].used) {
- return false;
- }
+ if (mBindingInfo[group].size() > 0) {
+ return false;
}
}
@@ -322,42 +747,74 @@ namespace dawn_native {
const BindGroupLayoutBase* layout) const {
ASSERT(!IsError());
- const auto& layoutInfo = layout->GetBindingInfo();
- for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
- const auto& moduleInfo = mBindingInfo[group][i];
- const auto& layoutBindingType = layoutInfo.types[i];
+ const BindGroupLayoutBase::BindingMap& bindingMap = layout->GetBindingMap();
+
+ // Iterate over all bindings used by this group in the shader, and find the
+ // corresponding binding in the BindGroupLayout, if it exists.
+ for (const auto& it : mBindingInfo[group]) {
+ BindingNumber bindingNumber = it.first;
+ const ShaderBindingInfo& moduleInfo = it.second;
- if (!moduleInfo.used) {
- continue;
+ const auto& bindingIt = bindingMap.find(bindingNumber);
+ if (bindingIt == bindingMap.end()) {
+ return false;
}
+ BindingIndex bindingIndex(bindingIt->second);
+
+ const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
- if (layoutBindingType != moduleInfo.type) {
+ if (bindingInfo.type != moduleInfo.type) {
// Binding mismatch between shader and bind group is invalid. For example, a
// writable binding in the shader with a readonly storage buffer in the bind group
// layout is invalid. However, a readonly binding in the shader with a writable
// storage buffer in the bind group layout is valid.
bool validBindingConversion =
- layoutBindingType == wgpu::BindingType::StorageBuffer &&
+ bindingInfo.type == wgpu::BindingType::StorageBuffer &&
moduleInfo.type == wgpu::BindingType::ReadonlyStorageBuffer;
if (!validBindingConversion) {
return false;
}
}
- if ((layoutInfo.visibilities[i] & StageBit(mExecutionModel)) == 0) {
+ if ((bindingInfo.visibility & StageBit(mExecutionModel)) == 0) {
return false;
}
- if (layoutBindingType == wgpu::BindingType::SampledTexture) {
- Format::Type layoutTextureComponentType =
- Format::TextureComponentTypeToFormatType(layoutInfo.textureComponentTypes[i]);
- if (layoutTextureComponentType != moduleInfo.textureComponentType) {
- return false;
+ switch (bindingInfo.type) {
+ case wgpu::BindingType::SampledTexture: {
+ if (bindingInfo.textureComponentType != moduleInfo.textureComponentType) {
+ return false;
+ }
+
+ if (bindingInfo.textureDimension != moduleInfo.textureDimension) {
+ return false;
+ }
+ break;
}
- if (layoutInfo.textureDimensions[i] != moduleInfo.textureDimension) {
- return false;
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ ASSERT(bindingInfo.storageTextureFormat != wgpu::TextureFormat::Undefined);
+ ASSERT(moduleInfo.storageTextureFormat != wgpu::TextureFormat::Undefined);
+ if (bindingInfo.storageTextureFormat != moduleInfo.storageTextureFormat) {
+ return false;
+ }
+ if (bindingInfo.textureDimension != moduleInfo.textureDimension) {
+ return false;
+ }
+ break;
}
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::Sampler:
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ default:
+ UNREACHABLE();
+ return false;
}
}
@@ -379,4 +836,18 @@ namespace dawn_native {
return a->mCode == b->mCode;
}
+ MaybeError ShaderModuleBase::CheckSpvcSuccess(shaderc_spvc_status status,
+ const char* error_msg) {
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR(error_msg);
+ }
+ return {};
+ }
+
+ shaderc_spvc::CompileOptions ShaderModuleBase::GetCompileOptions() {
+ shaderc_spvc::CompileOptions options;
+ options.SetValidate(GetDevice()->IsValidationEnabled());
+ return options;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index da69c4d98c6..0653bbfd17e 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_SHADERMODULE_H_
#include "common/Constants.h"
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Format.h"
@@ -28,6 +29,7 @@
#include <array>
#include <bitset>
+#include <map>
#include <vector>
namespace spirv_cross {
@@ -46,21 +48,21 @@ namespace dawn_native {
static ShaderModuleBase* MakeError(DeviceBase* device);
- void ExtractSpirvInfo(const spirv_cross::Compiler& compiler);
+ MaybeError ExtractSpirvInfo(const spirv_cross::Compiler& compiler);
- struct BindingInfo {
+ struct ShaderBindingInfo : BindingInfo {
// The SPIRV ID of the resource.
uint32_t id;
uint32_t base_type_id;
- wgpu::BindingType type;
- // Match the defaults in BindGroupLayoutDescriptor
- wgpu::TextureViewDimension textureDimension = wgpu::TextureViewDimension::Undefined;
- Format::Type textureComponentType = Format::Type::Float;
- bool multisampled = false;
- bool used = false;
+
+ private:
+ // Disallow access to unused members.
+ using BindingInfo::hasDynamicOffset;
+ using BindingInfo::visibility;
};
+
using ModuleBindingInfo =
- std::array<std::array<BindingInfo, kMaxBindingsPerGroup>, kMaxBindGroups>;
+ std::array<std::map<BindingNumber, ShaderBindingInfo>, kMaxBindGroups>;
const ModuleBindingInfo& GetBindingInfo() const;
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
@@ -81,7 +83,14 @@ namespace dawn_native {
bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
};
+ shaderc_spvc::Context* GetContext() {
+ return &mSpvcContext;
+ }
+
protected:
+ static MaybeError CheckSpvcSuccess(shaderc_spvc_status status, const char* error_msg);
+ shaderc_spvc::CompileOptions GetCompileOptions();
+
shaderc_spvc::Context mSpvcContext;
private:
@@ -89,6 +98,11 @@ namespace dawn_native {
bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout) const;
+ // Different implementations reflection into the shader depending on
+ // whether using spvc, or directly accessing spirv-cross.
+ MaybeError ExtractSpirvInfoWithSpvc();
+ MaybeError ExtractSpirvInfoWithSpirvCross(const spirv_cross::Compiler& compiler);
+
// TODO(cwallez@chromium.org): The code is only stored for deduplication. We could maybe
// store a cryptographic hash of the code instead?
std::vector<uint32_t> mCode;
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.cpp b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
new file mode 100644
index 00000000000..382bb71c6a5
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
@@ -0,0 +1,193 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/Surface.h"
+
+#include "common/Platform.h"
+#include "dawn_native/Instance.h"
+#include "dawn_native/SwapChain.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include "common/windows_with_undefs.h"
+#endif // DAWN_PLATFORM_WINDOWS
+
+#if defined(DAWN_USE_X11)
+# include "common/xlib_with_undefs.h"
+#endif // defined(DAWN_USE_X11)
+
+namespace dawn_native {
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ bool InheritsFromCAMetalLayer(void* obj);
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+ MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor) {
+ // TODO(cwallez@chromium.org): Have some type of helper to iterate over all the chained
+ // structures.
+ if (descriptor->nextInChain == nullptr) {
+ return DAWN_VALIDATION_ERROR("Surface cannot be created with just the base descriptor");
+ }
+
+ const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+ if (chainedDescriptor->nextInChain != nullptr) {
+ return DAWN_VALIDATION_ERROR("Cannot specify two windows for a single surface");
+ }
+
+ switch (chainedDescriptor->sType) {
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ case wgpu::SType::SurfaceDescriptorFromMetalLayer: {
+ const SurfaceDescriptorFromMetalLayer* metalDesc =
+ static_cast<const SurfaceDescriptorFromMetalLayer*>(chainedDescriptor);
+
+ // Check that the layer is a CAMetalLayer (or a derived class).
+ if (!InheritsFromCAMetalLayer(metalDesc->layer)) {
+ return DAWN_VALIDATION_ERROR("layer must be a CAMetalLayer");
+ }
+ break;
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ case wgpu::SType::SurfaceDescriptorFromWindowsHWND: {
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc =
+ static_cast<const SurfaceDescriptorFromWindowsHWND*>(chainedDescriptor);
+
+ // It is not possible to validate an HINSTANCE.
+
+ // Validate the hwnd using the windows.h IsWindow function.
+ if (IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0) {
+ return DAWN_VALIDATION_ERROR("Invalid HWND");
+ }
+ break;
+ }
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ case wgpu::SType::SurfaceDescriptorFromXlib: {
+ const SurfaceDescriptorFromXlib* xDesc =
+ static_cast<const SurfaceDescriptorFromXlib*>(chainedDescriptor);
+
+ // It is not possible to validate an X Display.
+
+ // Check the validity of the window by calling a getter function on the window that
+ // returns a status code. If the window is bad the call return a status of zero. We
+ // need to set a temporary X11 error handler while doing this because the default
+ // X11 error handler exits the program on any error.
+ XErrorHandler oldErrorHandler =
+ XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+ XWindowAttributes attributes;
+ int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
+ xDesc->window, &attributes);
+ XSetErrorHandler(oldErrorHandler);
+
+ if (status == 0) {
+ return DAWN_VALIDATION_ERROR("Invalid X Window");
+ }
+ break;
+ }
+#endif // defined(DAWN_USE_X11)
+
+ case wgpu::SType::SurfaceDescriptorFromHTMLCanvasId:
+ default:
+ return DAWN_VALIDATION_ERROR("Unsupported sType");
+ }
+
+ return {};
+ }
+
+ Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
+ : mInstance(instance) {
+ ASSERT(descriptor->nextInChain != nullptr);
+ const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+
+ switch (chainedDescriptor->sType) {
+ case wgpu::SType::SurfaceDescriptorFromMetalLayer: {
+ const SurfaceDescriptorFromMetalLayer* metalDesc =
+ static_cast<const SurfaceDescriptorFromMetalLayer*>(chainedDescriptor);
+ mType = Type::MetalLayer;
+ mMetalLayer = metalDesc->layer;
+ break;
+ }
+
+ case wgpu::SType::SurfaceDescriptorFromWindowsHWND: {
+ const SurfaceDescriptorFromWindowsHWND* hwndDesc =
+ static_cast<const SurfaceDescriptorFromWindowsHWND*>(chainedDescriptor);
+ mType = Type::WindowsHWND;
+ mHInstance = hwndDesc->hinstance;
+ mHWND = hwndDesc->hwnd;
+ break;
+ }
+
+ case wgpu::SType::SurfaceDescriptorFromXlib: {
+ const SurfaceDescriptorFromXlib* xDesc =
+ static_cast<const SurfaceDescriptorFromXlib*>(chainedDescriptor);
+ mType = Type::Xlib;
+ mXDisplay = xDesc->display;
+ mXWindow = xDesc->window;
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ Surface::~Surface() {
+ if (mSwapChain != nullptr) {
+ mSwapChain->DetachFromSurface();
+ mSwapChain = nullptr;
+ }
+ }
+
+ NewSwapChainBase* Surface::GetAttachedSwapChain() const {
+ return mSwapChain;
+ }
+
+ void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
+ mSwapChain = swapChain;
+ }
+
+ InstanceBase* Surface::GetInstance() {
+ return mInstance.Get();
+ }
+
+ Surface::Type Surface::GetType() const {
+ return mType;
+ }
+
+ void* Surface::GetMetalLayer() const {
+ ASSERT(mType == Type::MetalLayer);
+ return mMetalLayer;
+ }
+
+ void* Surface::GetHInstance() const {
+ ASSERT(mType == Type::WindowsHWND);
+ return mHInstance;
+ }
+ void* Surface::GetHWND() const {
+ ASSERT(mType == Type::WindowsHWND);
+ return mHWND;
+ }
+
+ void* Surface::GetXDisplay() const {
+ ASSERT(mType == Type::Xlib);
+ return mXDisplay;
+ }
+ uint32_t Surface::GetXWindow() const {
+ ASSERT(mType == Type::Xlib);
+ return mXWindow;
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.h b/chromium/third_party/dawn/src/dawn_native/Surface.h
new file mode 100644
index 00000000000..b33cf8a80ec
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.h
@@ -0,0 +1,79 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_SURFACE_H_
+#define DAWNNATIVE_SURFACE_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/Forward.h"
+#include "dawn_native/RefCounted.h"
+
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native {
+
+ MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+ const SurfaceDescriptor* descriptor);
+
+ // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
+ // aren't used because they would cause compilation errors on other OSes (or require
+ // ObjectiveC).
+ // The surface is also used to store the current swapchain so that we can detach it when it is
+ // replaced.
+ class Surface final : public RefCounted {
+ public:
+ Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+ ~Surface();
+
+ void SetAttachedSwapChain(NewSwapChainBase* swapChain);
+ NewSwapChainBase* GetAttachedSwapChain() const;
+
+ // These are valid to call on all Surfaces.
+ enum class Type { MetalLayer, WindowsHWND, Xlib };
+ Type GetType() const;
+ InstanceBase* GetInstance();
+
+ // Valid to call if the type is MetalLayer
+ void* GetMetalLayer() const;
+
+ // Valid to call if the type is WindowsHWND
+ void* GetHInstance() const;
+ void* GetHWND() const;
+
+ // Valid to call if the type is WindowsXlib
+ void* GetXDisplay() const;
+ uint32_t GetXWindow() const;
+
+ private:
+ Ref<InstanceBase> mInstance;
+ Type mType;
+
+ // The swapchain will set this to null when it is destroyed.
+ NewSwapChainBase* mSwapChain = nullptr;
+
+ // MetalLayer
+ void* mMetalLayer = nullptr;
+
+ // WindowsHwnd
+ void* mHInstance = nullptr;
+ void* mHWND = nullptr;
+
+ // Xlib
+ void* mXDisplay = nullptr;
+ uint32_t mXWindow = 0;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_SURFACE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm b/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm
new file mode 100644
index 00000000000..9989674fe6a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/Surface_metal.mm
@@ -0,0 +1,30 @@
+// Copyright 2020 the Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+# error "Surface_metal.mm requires the Metal backend to be enabled."
+#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#import <QuartzCore/CAMetalLayer.h>
+
+namespace dawn_native {
+
+ bool InheritsFromCAMetalLayer(void* obj) {
+ id<NSObject> object = static_cast<id>(obj);
+ return [object isKindOfClass:[CAMetalLayer class]];
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index 37549d38c53..86a9c89d2f4 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -14,7 +14,10 @@
#include "dawn_native/SwapChain.h"
+#include "common/Constants.h"
+#include "dawn_native/Adapter.h"
#include "dawn_native/Device.h"
+#include "dawn_native/Surface.h"
#include "dawn_native/Texture.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -28,40 +31,88 @@ namespace dawn_native {
}
private:
- TextureBase* GetNextTextureImpl(const TextureDescriptor*) override {
- UNREACHABLE();
+ void Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override {
+ GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
}
- MaybeError OnBeforePresent(TextureBase* texture) override {
- UNREACHABLE();
+ TextureViewBase* GetCurrentTextureView() override {
+ GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ void Present() override {
+ GetDevice()->ConsumedError(DAWN_VALIDATION_ERROR("error swapchain"));
}
};
} // anonymous namespace
MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
const SwapChainDescriptor* descriptor) {
- if (descriptor->implementation == 0) {
- return DAWN_VALIDATION_ERROR("Null implementation for the swapchain");
- }
+ if (descriptor->implementation != 0) {
+ if (surface != nullptr) {
+ return DAWN_VALIDATION_ERROR(
+ "Exactly one of surface or implementation must be set");
+ }
+
+ DawnSwapChainImplementation* impl =
+ reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+
+ if (!impl->Init || !impl->Destroy || !impl->Configure || !impl->GetNextTexture ||
+ !impl->Present) {
+ return DAWN_VALIDATION_ERROR("Implementation is incomplete");
+ }
- DawnSwapChainImplementation* impl =
- reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+ } else {
+ if (surface == nullptr) {
+ return DAWN_VALIDATION_ERROR(
+ "At least one of surface or implementation must be set");
+ }
+
+ DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
+
+ // TODO(cwallez@chromium.org): Lift this restriction once
+ // wgpu::Instance::GetPreferredSurfaceFormat is implemented.
+ if (descriptor->format != wgpu::TextureFormat::BGRA8Unorm) {
+ return DAWN_VALIDATION_ERROR("Format must (currently) be BGRA8Unorm");
+ }
- if (!impl->Init || !impl->Destroy || !impl->Configure || !impl->GetNextTexture ||
- !impl->Present) {
- return DAWN_VALIDATION_ERROR("Implementation is incomplete");
+ if (descriptor->usage != wgpu::TextureUsage::OutputAttachment) {
+ return DAWN_VALIDATION_ERROR("Usage must (currently) be OutputAttachment");
+ }
+
+ if (descriptor->width == 0 || descriptor->height == 0) {
+ return DAWN_VALIDATION_ERROR("Swapchain size can't be empty");
+ }
+
+ if (descriptor->width > kMaxTextureSize || descriptor->height > kMaxTextureSize) {
+ return DAWN_VALIDATION_ERROR("Swapchain size too big");
+ }
}
return {};
}
- // SwapChain
+ TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
+ TextureDescriptor desc;
+ desc.usage = swapChain->GetUsage();
+ desc.dimension = wgpu::TextureDimension::e2D;
+ desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
+ desc.arrayLayerCount = 1;
+ desc.format = swapChain->GetFormat();
+ desc.mipLevelCount = 1;
+ desc.sampleCount = 1;
+
+ return desc;
+ }
- SwapChainBase::SwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
- : ObjectBase(device),
- mImplementation(
- *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+ // SwapChainBase
+
+ SwapChainBase::SwapChainBase(DeviceBase* device) : ObjectBase(device) {
}
SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -69,10 +120,6 @@ namespace dawn_native {
}
SwapChainBase::~SwapChainBase() {
- if (!IsError()) {
- const auto& im = GetImplementation();
- im.Destroy(im.userData);
- }
}
// static
@@ -80,10 +127,25 @@ namespace dawn_native {
return new ErrorSwapChain(device);
}
- void SwapChainBase::Configure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) {
+ // OldSwapChainBase
+
+ OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mImplementation(
+ *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+ }
+
+ OldSwapChainBase::~OldSwapChainBase() {
+ if (!IsError()) {
+ const auto& im = GetImplementation();
+ im.Destroy(im.userData);
+ }
+ }
+
+ void OldSwapChainBase::Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
return;
}
@@ -99,7 +161,7 @@ namespace dawn_native {
static_cast<WGPUTextureUsage>(allowedUsage), width, height);
}
- TextureViewBase* SwapChainBase::GetCurrentTextureView() {
+ TextureViewBase* OldSwapChainBase::GetCurrentTextureView() {
if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
return TextureViewBase::MakeError(GetDevice());
}
@@ -133,7 +195,7 @@ namespace dawn_native {
return mCurrentTextureView.Get();
}
- void SwapChainBase::Present() {
+ void OldSwapChainBase::Present() {
if (GetDevice()->ConsumedError(ValidatePresent())) {
return;
}
@@ -149,15 +211,16 @@ namespace dawn_native {
mCurrentTextureView = nullptr;
}
- const DawnSwapChainImplementation& SwapChainBase::GetImplementation() {
+ const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
ASSERT(!IsError());
return mImplementation;
}
- MaybeError SwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
- wgpu::TextureUsage allowedUsage,
- uint32_t width,
- uint32_t height) const {
+ MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(ValidateTextureUsage(allowedUsage));
@@ -170,7 +233,8 @@ namespace dawn_native {
return {};
}
- MaybeError SwapChainBase::ValidateGetCurrentTextureView() const {
+ MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
if (mWidth == 0) {
@@ -181,7 +245,8 @@ namespace dawn_native {
return {};
}
- MaybeError SwapChainBase::ValidatePresent() const {
+ MaybeError OldSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
if (mCurrentTextureView.Get() == nullptr) {
@@ -192,4 +257,149 @@ namespace dawn_native {
return {};
}
+ // Implementation of NewSwapChainBase
+
+ NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
+ Surface* surface,
+ const SwapChainDescriptor* descriptor)
+ : SwapChainBase(device),
+ mAttached(true),
+ mWidth(descriptor->width),
+ mHeight(descriptor->height),
+ mFormat(descriptor->format),
+ mUsage(descriptor->usage),
+ mPresentMode(descriptor->presentMode),
+ mSurface(surface) {
+ }
+
+ NewSwapChainBase::~NewSwapChainBase() {
+ if (mCurrentTextureView.Get() != nullptr) {
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
+ }
+
+ ASSERT(!mAttached);
+ ASSERT(mSurface == nullptr);
+ }
+
+ void NewSwapChainBase::DetachFromSurface() {
+ if (mAttached) {
+ DetachFromSurfaceImpl();
+ GetSurface()->SetAttachedSwapChain(nullptr);
+ mSurface = nullptr;
+ mAttached = false;
+ }
+ }
+
+ void NewSwapChainBase::Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) {
+ GetDevice()->ConsumedError(
+ DAWN_VALIDATION_ERROR("Configure is invalid for surface-based swapchains"));
+ }
+
+ TextureViewBase* NewSwapChainBase::GetCurrentTextureView() {
+ if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ if (mCurrentTextureView.Get() != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference so add it even when
+ // reusing the existing texture view.
+ mCurrentTextureView->Reference();
+ return mCurrentTextureView.Get();
+ }
+
+ TextureViewBase* view = nullptr;
+ if (GetDevice()->ConsumedError(GetCurrentTextureViewImpl(), &view)) {
+ return TextureViewBase::MakeError(GetDevice());
+ }
+
+ // Check that the return texture view matches exactly what was given for this descriptor.
+ ASSERT(view->GetTexture()->GetFormat().format == mFormat);
+ ASSERT((view->GetTexture()->GetUsage() & mUsage) == mUsage);
+ ASSERT(view->GetLevelCount() == 1);
+ ASSERT(view->GetLayerCount() == 1);
+ ASSERT(view->GetDimension() == wgpu::TextureViewDimension::e2D);
+ ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).width == mWidth);
+ ASSERT(view->GetTexture()->GetMipLevelVirtualSize(view->GetBaseMipLevel()).height ==
+ mHeight);
+
+ mCurrentTextureView = view;
+ return view;
+ }
+
+ void NewSwapChainBase::Present() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
+ return;
+ }
+
+ if (GetDevice()->ConsumedError(PresentImpl())) {
+ return;
+ }
+
+ ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+ TextureBase::TextureState::Destroyed);
+ mCurrentTextureView = nullptr;
+ }
+
+ uint32_t NewSwapChainBase::GetWidth() const {
+ return mWidth;
+ }
+
+ uint32_t NewSwapChainBase::GetHeight() const {
+ return mHeight;
+ }
+
+ wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
+ return mFormat;
+ }
+
+ wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
+ return mUsage;
+ }
+
+ wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
+ return mPresentMode;
+ }
+
+ Surface* NewSwapChainBase::GetSurface() const {
+ return mSurface;
+ }
+
+ bool NewSwapChainBase::IsAttached() const {
+ return mAttached;
+ }
+
+ wgpu::BackendType NewSwapChainBase::GetBackendType() const {
+ return GetDevice()->GetAdapter()->GetBackendType();
+ }
+
+ MaybeError NewSwapChainBase::ValidatePresent() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ if (!mAttached) {
+ return DAWN_VALIDATION_ERROR("Presenting on detached swapchain");
+ }
+
+ if (mCurrentTextureView.Get() == nullptr) {
+ return DAWN_VALIDATION_ERROR("Presenting without prior GetCurrentTextureView");
+ }
+
+ return {};
+ }
+
+ MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ if (!mAttached) {
+ return DAWN_VALIDATION_ERROR("Getting view on detached swapchain");
+ }
+
+ return {};
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index 7742293aec3..d1d0b7f2557 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -25,12 +25,35 @@
namespace dawn_native {
MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+ const Surface* surface,
const SwapChainDescriptor* descriptor);
+ TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
+
class SwapChainBase : public ObjectBase {
public:
- SwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
- ~SwapChainBase();
+ SwapChainBase(DeviceBase* device);
+ virtual ~SwapChainBase();
+
+ static SwapChainBase* MakeError(DeviceBase* device);
+
+ // Dawn API
+ virtual void Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) = 0;
+ virtual TextureViewBase* GetCurrentTextureView() = 0;
+ virtual void Present() = 0;
+
+ protected:
+ SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ };
+
+ // The base class for implementation-based SwapChains that are deprecated.
+ class OldSwapChainBase : public SwapChainBase {
+ public:
+ OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChainBase();
static SwapChainBase* MakeError(DeviceBase* device);
@@ -38,13 +61,11 @@ namespace dawn_native {
void Configure(wgpu::TextureFormat format,
wgpu::TextureUsage allowedUsage,
uint32_t width,
- uint32_t height);
- TextureViewBase* GetCurrentTextureView();
- void Present();
+ uint32_t height) override;
+ TextureViewBase* GetCurrentTextureView() override;
+ void Present() override;
protected:
- SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
const DawnSwapChainImplementation& GetImplementation();
virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
virtual MaybeError OnBeforePresent(TextureBase* texture) = 0;
@@ -66,6 +87,76 @@ namespace dawn_native {
Ref<TextureViewBase> mCurrentTextureView;
};
+ // The base class for surface-based SwapChains that aren't ready yet.
+ class NewSwapChainBase : public SwapChainBase {
+ public:
+ NewSwapChainBase(DeviceBase* device,
+ Surface* surface,
+ const SwapChainDescriptor* descriptor);
+ ~NewSwapChainBase() override;
+
+ // This is called when the swapchain is detached for any reason:
+ //
+ // - The swapchain is being destroyed.
+ // - The surface it is attached to is being destroyed.
+ // - The swapchain is being replaced by another one on the surface.
+ //
+ // The call for the old swapchain being replaced should be called inside the backend
+ // implementation of SwapChains. This is to allow them to acquire any resources before
+ // calling detach to make a seamless transition from the previous swapchain.
+ //
+ // Likewise the call for the swapchain being destroyed must be done in the backend's
+ // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
+ // destructor.
+ void DetachFromSurface();
+
+ // Dawn API
+ void Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
+ uint32_t width,
+ uint32_t height) override;
+ TextureViewBase* GetCurrentTextureView() override;
+ void Present() override;
+
+ uint32_t GetWidth() const;
+ uint32_t GetHeight() const;
+ wgpu::TextureFormat GetFormat() const;
+ wgpu::TextureUsage GetUsage() const;
+ wgpu::PresentMode GetPresentMode() const;
+ Surface* GetSurface() const;
+ bool IsAttached() const;
+ wgpu::BackendType GetBackendType() const;
+
+ private:
+ bool mAttached;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ wgpu::TextureFormat mFormat;
+ wgpu::TextureUsage mUsage;
+ wgpu::PresentMode mPresentMode;
+
+ // This is a weak reference to the surface. If the surface is destroyed it will call
+ // DetachFromSurface and mSurface will be updated to nullptr.
+ Surface* mSurface = nullptr;
+ Ref<TextureViewBase> mCurrentTextureView;
+
+ MaybeError ValidatePresent() const;
+ MaybeError ValidateGetCurrentTextureView() const;
+
+ // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
+ // manner, starting with GetCurrentTextureViewImpl.
+
+ // The returned texture view must match the swapchain descriptor exactly.
+ virtual ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() = 0;
+ // The call to present must destroy the current view's texture so further access to it are
+ // invalid.
+ virtual MaybeError PresentImpl() = 0;
+
+ // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
+ // called no other virtual method can be called.
+ virtual void DetachFromSurfaceImpl() = 0;
+ };
+
} // namespace dawn_native
#endif // DAWNNATIVE_SWAPCHAIN_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index b6a50d67c26..7b264c1ec18 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -108,6 +108,11 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"The sample counts of the textures in BC formats must be 1.");
}
+
+ if (descriptor->usage & wgpu::TextureUsage::Storage) {
+ return DAWN_VALIDATION_ERROR(
+ "The sample counts of the storage textures must be 1.");
+ }
}
return {};
@@ -141,6 +146,10 @@ namespace dawn_native {
MaybeError ValidateTextureSize(const TextureDescriptor* descriptor, const Format* format) {
ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0);
+ if (descriptor->size.width > kMaxTextureSize ||
+ descriptor->size.height > kMaxTextureSize) {
+ return DAWN_VALIDATION_ERROR("Texture max size exceeded");
+ }
if (Log2(std::max(descriptor->size.width, descriptor->size.height)) + 1 <
descriptor->mipLevelCount) {
@@ -153,6 +162,13 @@ namespace dawn_native {
"The size of the texture is incompatible with the texture format");
}
+ if (descriptor->arrayLayerCount > kMaxTexture2DArrayLayers) {
+ return DAWN_VALIDATION_ERROR("Texture 2D array layer count exceeded");
+ }
+ if (descriptor->mipLevelCount > kMaxTexture2DMipLevels) {
+ return DAWN_VALIDATION_ERROR("Max texture 2D mip level exceeded");
+ }
+
return {};
}
@@ -173,8 +189,9 @@ namespace dawn_native {
"Non-renderable format used with OutputAttachment usage");
}
- if (descriptor->usage & wgpu::TextureUsage::Storage) {
- return DAWN_VALIDATION_ERROR("storage textures aren't supported (yet)");
+ if (!format->supportsStorageUsage &&
+ (descriptor->usage & wgpu::TextureUsage::Storage)) {
+ return DAWN_VALIDATION_ERROR("Format cannot be used in storage textures");
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index 8f5247fdf33..225918c26f9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -29,68 +29,101 @@ namespace dawn_native {
using ToggleEnumAndInfoList =
std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
- static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {
- {{Toggle::EmulateStoreAndMSAAResolve,
- {"emulate_store_and_msaa_resolve",
- "Emulate storing into multisampled color attachments and doing MSAA resolve "
- "simultaneously. This workaround is enabled by default on the Metal drivers that do "
- "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
- "those platforms, we should do MSAA resolve in another render pass after ending the "
- "previous one.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=56"}},
- {Toggle::NonzeroClearResourcesOnCreationForTesting,
- {"nonzero_clear_resources_on_creation_for_testing",
- "Clears texture to full 1 bits as soon as they are created, but doesn't update "
- "the tracking state of the texture. This way we can test the logic of clearing "
- "textures that use recycled memory.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=145"}},
- {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
- {"always_resolve_into_zero_level_and_layer",
- "When the resolve target is a texture view that is created on the non-zero level or "
- "layer of a texture, we first resolve into a temporarily 2D texture with only one "
- "mipmap level and one array layer, and copy the result of MSAA resolve into the "
- "true resolve target. This workaround is enabled by default on the Metal drivers "
- "that have bugs when setting non-zero resolveLevel or resolveSlice.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=56"}},
- {Toggle::LazyClearResourceOnFirstUse,
- {"lazy_clear_resource_on_first_use",
- "Clears resource to zero on first usage. This initializes the resource "
- "so that no dirty bits from recycled memory is present in the new resource.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=145"}},
- {Toggle::TurnOffVsync,
- {"turn_off_vsync",
- "Turn off vsync when rendering. In order to do performance test or run perf tests, "
- "turn off vsync so that the fps can exeed 60.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=237"}},
- {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
- {"use_temporary_buffer_in_texture_to_texture_copy",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "when copying between compressed textures that don't have block-aligned sizes. This "
- "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
- "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
- "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"}},
- {Toggle::UseD3D12ResourceHeapTier2,
- {"use_d3d12_resource_heap_tier2",
- "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
- "texture and buffers in the same heap. This allows better heap re-use and reduces "
- "fragmentation."}},
- {Toggle::UseD3D12RenderPass,
- {"use_d3d12_render_pass",
- "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
- "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
- "will emulate a render pass."}},
- {Toggle::SkipValidation,
- {"skip_validation", "Skip expensive validation of Dawn commands."}},
- {Toggle::UseSpvc,
- {"use_spvc",
- "Enable use of spvc for shader compilation, instead of accessing spirv_cross "
- "directly."}},
- {Toggle::UseSpvcIRGen,
- {"use_spvc_ir_gen",
- "Enable usage of spvc's internal parsing and IR generation code, instead of "
- "spirv_cross's."}}}};
+ static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
+ {Toggle::EmulateStoreAndMSAAResolve,
+ {"emulate_store_and_msaa_resolve",
+ "Emulate storing into multisampled color attachments and doing MSAA resolve "
+ "simultaneously. This workaround is enabled by default on the Metal drivers that do "
+ "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
+ "those platforms, we should do MSAA resolve in another render pass after ending the "
+ "previous one.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::NonzeroClearResourcesOnCreationForTesting,
+ {"nonzero_clear_resources_on_creation_for_testing",
+ "Clears texture to full 1 bits as soon as they are created, but doesn't update "
+ "the tracking state of the texture. This way we can test the logic of clearing "
+ "textures that use recycled memory.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+ {"always_resolve_into_zero_level_and_layer",
+ "When the resolve target is a texture view that is created on the non-zero level or "
+ "layer of a texture, we first resolve into a temporarily 2D texture with only one "
+ "mipmap level and one array layer, and copy the result of MSAA resolve into the "
+ "true resolve target. This workaround is enabled by default on the Metal drivers "
+ "that have bugs when setting non-zero resolveLevel or resolveSlice.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::LazyClearResourceOnFirstUse,
+ {"lazy_clear_resource_on_first_use",
+ "Clears resource to zero on first usage. This initializes the resource "
+ "so that no dirty bits from recycled memory is present in the new resource.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::TurnOffVsync,
+ {"turn_off_vsync",
+ "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+ "turn off vsync so that the fps can exeed 60.",
+ "https://crbug.com/dawn/237"}},
+ {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+ {"use_temporary_buffer_in_texture_to_texture_copy",
+ "Split texture-to-texture copy into two copies: copy from source texture into a "
+ "temporary buffer, and copy from the temporary buffer into the destination texture "
+ "when copying between compressed textures that don't have block-aligned sizes. This "
+ "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
+ "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
+ "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+ "https://crbug.com/dawn/42"}},
+ {Toggle::UseD3D12ResourceHeapTier2,
+ {"use_d3d12_resource_heap_tier2",
+ "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+ "texture and buffers in the same heap. This allows better heap re-use and reduces "
+ "fragmentation.",
+ "https://crbug.com/dawn/27"}},
+ {Toggle::UseD3D12RenderPass,
+ {"use_d3d12_render_pass",
+ "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+ "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+ "will emulate a render pass.",
+ "https://crbug.com/dawn/36"}},
+ {Toggle::UseD3D12ResidencyManagement,
+ {"use_d3d12_residency_management",
+ "Enable residency management. This allows page-in and page-out of resource heaps in "
+ "GPU memory. This component improves overcommitted performance by keeping the most "
+ "recently used resources local to the GPU. Turning this component off can cause "
+ "allocation failures when application memory exceeds physical device memory."}},
+ {Toggle::SkipValidation,
+ {"skip_validation", "Skip expensive validation of Dawn commands.",
+ "https://crbug.com/dawn/271"}},
+ {Toggle::UseSpvc,
+ {"use_spvc",
+ "Enable use of spvc for shader compilation, instead of accessing spirv_cross "
+ "directly.",
+ "https://crbug.com/dawn/288"}},
+ {Toggle::UseSpvcParser,
+ {"use_spvc_parser",
+ "Enable usage of spvc's internal parsing and IR generation code, instead of "
+ "spirv_cross's.",
+ "https://crbug.com/dawn/288"}},
+ {Toggle::VulkanUseD32S8,
+ {"vulkan_use_d32s8",
+ "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
+ "backend will use D32S8 (toggle to on) but setting the toggle to off will make it"
+ "use the D24S8 format when possible.",
+ "https://crbug.com/dawn/286"}},
+ {Toggle::MetalDisableSamplerCompare,
+ {"metal_disable_sampler_compare",
+ "Disables the use of sampler compare on Metal. This is unsupported before A9 "
+ "processors."}},
+ {Toggle::DisableBaseVertex,
+ {"disable_base_vertex",
+ "Disables the use of non-zero base vertex which is unsupported on some platforms."}},
+ {Toggle::DisableBaseInstance,
+ {"disable_base_instance",
+ "Disables the use of non-zero base instance which is unsupported on some "
+ "platforms."}},
+ {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+ {"use_d3d12_small_shader_visible_heap",
+ "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
+ "default. This setting is used to test bindgroup encoding."}},
+ }};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index aa5c4f9bcd9..3cc40ecb28a 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -32,9 +32,15 @@ namespace dawn_native {
UseTemporaryBufferInCompressedTextureToTextureCopy,
UseD3D12ResourceHeapTier2,
UseD3D12RenderPass,
+ UseD3D12ResidencyManagement,
SkipValidation,
UseSpvc,
- UseSpvcIRGen,
+ UseSpvcParser,
+ VulkanUseD32S8,
+ MetalDisableSamplerCompare,
+ DisableBaseVertex,
+ DisableBaseInstance,
+ UseD3D12SmallShaderVisibleHeapForTesting,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index c39791a8767..9f64134423a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -34,8 +34,8 @@ namespace dawn_native { namespace d3d12 {
}
};
- Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter1> hardwareAdapter)
- : AdapterBase(backend->GetInstance(), BackendType::D3D12),
+ Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
+ : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
mHardwareAdapter(hardwareAdapter),
mBackend(backend) {
}
@@ -44,7 +44,7 @@ namespace dawn_native { namespace d3d12 {
return mDeviceInfo;
}
- IDXGIAdapter1* Adapter::GetHardwareAdapter() const {
+ IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
return mHardwareAdapter.Get();
}
@@ -63,21 +63,22 @@ namespace dawn_native { namespace d3d12 {
const PlatformFunctions* functions = GetBackend()->GetFunctions();
if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
_uuidof(ID3D12Device), &mD3d12Device))) {
- return DAWN_DEVICE_LOST_ERROR("D3D12CreateDevice failed");
+ return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
}
- DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
-
DXGI_ADAPTER_DESC1 adapterDesc;
mHardwareAdapter->GetDesc1(&adapterDesc);
mPCIInfo.deviceId = adapterDesc.DeviceId;
mPCIInfo.vendorId = adapterDesc.VendorId;
+ DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
- mDeviceType = DeviceType::CPU;
+ mAdapterType = wgpu::AdapterType::CPU;
} else {
- mDeviceType = (mDeviceInfo.isUMA) ? DeviceType::IntegratedGPU : DeviceType::DiscreteGPU;
+ mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
+ : wgpu::AdapterType::DiscreteGPU;
}
std::wstring_convert<DeletableFacet<std::codecvt<wchar_t, char, std::mbstate_t>>> converter(
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
index 6c085f03ddd..6c2d4f149fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.h
@@ -26,11 +26,11 @@ namespace dawn_native { namespace d3d12 {
class Adapter : public AdapterBase {
public:
- Adapter(Backend* backend, ComPtr<IDXGIAdapter1> hardwareAdapter);
+ Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
virtual ~Adapter() = default;
const D3D12DeviceInfo& GetDeviceInfo() const;
- IDXGIAdapter1* GetHardwareAdapter() const;
+ IDXGIAdapter3* GetHardwareAdapter() const;
Backend* GetBackend() const;
ComPtr<ID3D12Device> GetDevice() const;
@@ -40,7 +40,7 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
void InitializeSupportedExtensions();
- ComPtr<IDXGIAdapter1> mHardwareAdapter;
+ ComPtr<IDXGIAdapter3> mHardwareAdapter;
ComPtr<ID3D12Device> mD3d12Device;
Backend* mBackend;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
index 71beb6c5be6..b7a3f82cb48 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
@@ -61,7 +61,7 @@ namespace dawn_native { namespace d3d12 {
}
if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
- return DAWN_DEVICE_LOST_ERROR("Failed to create a DXGI factory");
+ return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
}
ASSERT(factory != nullptr);
@@ -70,7 +70,8 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
- Backend::Backend(InstanceBase* instance) : BackendConnection(instance, BackendType::D3D12) {
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::D3D12) {
}
MaybeError Backend::Initialize() {
@@ -105,7 +106,12 @@ namespace dawn_native { namespace d3d12 {
ASSERT(dxgiAdapter != nullptr);
- std::unique_ptr<Adapter> adapter = std::make_unique<Adapter>(this, dxgiAdapter);
+ ComPtr<IDXGIAdapter3> dxgiAdapter3;
+ HRESULT result = dxgiAdapter.As(&dxgiAdapter3);
+ ASSERT(SUCCEEDED(result));
+
+ std::unique_ptr<Adapter> adapter =
+ std::make_unique<Adapter>(this, std::move(dxgiAdapter3));
if (GetInstance()->ConsumedError(adapter->Initialize())) {
continue;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 38ca3a4c975..3abae0ea629 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -13,55 +13,103 @@
// limitations under the License.
#include "dawn_native/d3d12/BindGroupD3D12.h"
+
#include "common/BitSetIterator.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "dawn_native/d3d12/BufferD3D12.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
namespace dawn_native { namespace d3d12 {
+ // static
+ BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
- : BindGroupBase(device, descriptor) {
+ : BindGroupBase(this, device, descriptor) {
+ }
+
+ BindGroup::~BindGroup() {
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
}
- void BindGroup::AllocateDescriptors(const DescriptorHeapHandle& cbvUavSrvHeapStart,
- uint32_t* cbvUavSrvHeapOffset,
- const DescriptorHeapHandle& samplerHeapStart,
- uint32_t* samplerHeapOffset) {
- const auto* bgl = ToBackend(GetLayout());
- const auto& layout = bgl->GetBindingInfo();
+ ResultOrError<bool> BindGroup::Populate(ShaderVisibleDescriptorAllocator* allocator) {
+ Device* device = ToBackend(GetDevice());
+
+ if (allocator->IsAllocationStillValid(mLastUsageSerial, mHeapSerial)) {
+ return true;
+ }
+
+ // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+ // If either failed, return early to re-allocate and switch the heaps.
+ const BindGroupLayout* bgl = ToBackend(GetLayout());
+ const Serial pendingSerial = device->GetPendingCommandSerial();
+
+ const uint32_t cbvUavSrvDescriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+ DescriptorHeapAllocation cbvSrvUavDescriptorHeapAllocation;
+ if (cbvUavSrvDescriptorCount > 0) {
+ DAWN_TRY_ASSIGN(
+ cbvSrvUavDescriptorHeapAllocation,
+ allocator->AllocateGPUDescriptors(cbvUavSrvDescriptorCount, pendingSerial,
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
+ if (cbvSrvUavDescriptorHeapAllocation.IsInvalid()) {
+ return false;
+ }
+
+ mBaseCbvSrvUavDescriptor = cbvSrvUavDescriptorHeapAllocation.GetGPUHandle(0);
+ }
+
+ const uint32_t samplerDescriptorCount = bgl->GetSamplerDescriptorCount();
+ DescriptorHeapAllocation samplerDescriptorHeapAllocation;
+ if (samplerDescriptorCount > 0) {
+ DAWN_TRY_ASSIGN(samplerDescriptorHeapAllocation,
+ allocator->AllocateGPUDescriptors(samplerDescriptorCount, pendingSerial,
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+ if (samplerDescriptorHeapAllocation.IsInvalid()) {
+ return false;
+ }
+
+ mBaseSamplerDescriptor = samplerDescriptorHeapAllocation.GetGPUHandle(0);
+ }
- // Save the offset to the start of the descriptor table in the heap
- mCbvUavSrvHeapOffset = *cbvUavSrvHeapOffset;
- mSamplerHeapOffset = *samplerHeapOffset;
+ // Record both the device and heap serials to determine later if the allocations are still
+ // valid.
+ mLastUsageSerial = pendingSerial;
+ mHeapSerial = allocator->GetShaderVisibleHeapsSerial();
const auto& bindingOffsets = bgl->GetBindingOffsets();
- auto d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
- for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
- // It's not necessary to create descriptors in descriptor heap for dynamic resources.
- // So skip allocating descriptors in descriptor heaps for dynamic buffers.
- if (layout.hasDynamicOffset[bindingIndex]) {
+ ID3D12Device* d3d12Device = device->GetD3D12Device().Get();
+
+ for (BindingIndex bindingIndex = 0; bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+
+ // It's not necessary to create descriptors in descriptor heap for dynamic
+ // resources. So skip allocating descriptors in descriptor heaps for dynamic
+ // buffers.
+ if (bindingInfo.hasDynamicOffset) {
continue;
}
- switch (layout.types[bindingIndex]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
- // TODO(enga@google.com): investigate if this needs to be a constraint at the
- // API level
+ // TODO(enga@google.com): investigate if this needs to be a constraint at
+ // the API level
desc.SizeInBytes = Align(binding.size, 256);
desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
d3d12Device->CreateConstantBufferView(
- &desc, cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
- bindingOffsets[bindingIndex]));
- } break;
+ &desc, cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
+ bindingOffsets[bindingIndex]));
+ break;
+ }
case wgpu::BindingType::StorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
@@ -83,27 +131,52 @@ namespace dawn_native { namespace d3d12 {
d3d12Device->CreateUnorderedAccessView(
ToBackend(binding.buffer)->GetD3D12Resource().Get(), nullptr, &desc,
- cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
- bindingOffsets[bindingIndex]));
- } break;
+ cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
+ bindingOffsets[bindingIndex]));
+ break;
+ }
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
+ BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+
+ // Like StorageBuffer, SPIRV-Cross outputs HLSL shaders for readonly storage
+ // buffer with ByteAddressBuffer. So we must use D3D12_BUFFER_SRV_FLAG_RAW
+ // when making the SRV descriptor. And it has similar requirement for
+ // format, element size, etc.
+ D3D12_SHADER_RESOURCE_VIEW_DESC desc;
+ desc.Format = DXGI_FORMAT_R32_TYPELESS;
+ desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
+ desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+ desc.Buffer.FirstElement = binding.offset / 4;
+ desc.Buffer.NumElements = binding.size / 4;
+ desc.Buffer.StructureByteStride = 0;
+ desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
+ d3d12Device->CreateShaderResourceView(
+ ToBackend(binding.buffer)->GetD3D12Resource().Get(), &desc,
+ cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
+ bindingOffsets[bindingIndex]));
+ break;
+ }
case wgpu::BindingType::SampledTexture: {
auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
auto& srv = view->GetSRVDescriptor();
d3d12Device->CreateShaderResourceView(
ToBackend(view->GetTexture())->GetD3D12Resource(), &srv,
- cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
- bindingOffsets[bindingIndex]));
- } break;
+ cbvSrvUavDescriptorHeapAllocation.GetCPUHandle(
+ bindingOffsets[bindingIndex]));
+ break;
+ }
case wgpu::BindingType::Sampler: {
auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
auto& samplerDesc = sampler->GetSamplerDescriptor();
d3d12Device->CreateSampler(
- &samplerDesc, samplerHeapStart.GetCPUHandle(*samplerHeapOffset +
- bindingOffsets[bindingIndex]));
- } break;
+ &samplerDesc,
+ samplerDescriptorHeapAllocation.GetCPUHandle(bindingOffsets[bindingIndex]));
+ break;
+ }
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
@@ -111,24 +184,14 @@ namespace dawn_native { namespace d3d12 {
}
}
- // Offset by the number of descriptors created
- *cbvUavSrvHeapOffset += bgl->GetCbvUavSrvDescriptorCount();
- *samplerHeapOffset += bgl->GetSamplerDescriptorCount();
- }
-
- uint32_t BindGroup::GetCbvUavSrvHeapOffset() const {
- return mCbvUavSrvHeapOffset;
+ return true;
}
- uint32_t BindGroup::GetSamplerHeapOffset() const {
- return mSamplerHeapOffset;
+ D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseCbvUavSrvDescriptor() const {
+ return mBaseCbvSrvUavDescriptor;
}
- bool BindGroup::TestAndSetCounted(uint64_t heapSerial, uint32_t indexInSubmit) {
- bool isCounted = (mHeapSerial == heapSerial && mIndexInSubmit == indexInSubmit);
- mHeapSerial = heapSerial;
- mIndexInSubmit = indexInSubmit;
- return isCounted;
+ D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
+ return mBaseSamplerDescriptor;
}
-
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
index 458e9926f7d..dc5fb756658 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.h
@@ -15,37 +15,36 @@
#ifndef DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
#define DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
+#include "common/PlacementAllocated.h"
+#include "common/Serial.h"
#include "dawn_native/BindGroup.h"
-
#include "dawn_native/d3d12/d3d12_platform.h"
-#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
-
namespace dawn_native { namespace d3d12 {
class Device;
+ class ShaderVisibleDescriptorAllocator;
- class BindGroup : public BindGroupBase {
+ class BindGroup : public BindGroupBase, public PlacementAllocated {
public:
+ static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
+
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ ~BindGroup() override;
- void AllocateDescriptors(const DescriptorHeapHandle& cbvSrvUavHeapStart,
- uint32_t* cbvUavSrvHeapOffset,
- const DescriptorHeapHandle& samplerHeapStart,
- uint32_t* samplerHeapOffset);
- uint32_t GetCbvUavSrvHeapOffset() const;
- uint32_t GetSamplerHeapOffset() const;
+ // Returns true if the BindGroup was successfully populated.
+ ResultOrError<bool> Populate(ShaderVisibleDescriptorAllocator* allocator);
- bool TestAndSetCounted(uint64_t heapSerial, uint32_t indexInSubmit);
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseCbvUavSrvDescriptor() const;
+ D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
private:
- uint32_t mCbvUavSrvHeapOffset;
- uint32_t mSamplerHeapOffset;
+ Serial mLastUsageSerial = 0;
+ Serial mHeapSerial = 0;
- uint64_t mHeapSerial = 0;
- uint32_t mIndexInSubmit = 0;
+ D3D12_GPU_DESCRIPTOR_HANDLE mBaseCbvSrvUavDescriptor = {0};
+ D3D12_GPU_DESCRIPTOR_HANDLE mBaseSamplerDescriptor = {0};
};
-
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index b8107108425..0def96c8ad5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -15,42 +15,49 @@
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
#include "common/BitSetIterator.h"
+#include "dawn_native/d3d12/BindGroupD3D12.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
namespace dawn_native { namespace d3d12 {
-
- BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
- : BindGroupLayoutBase(device, descriptor), mDescriptorCounts{} {
- const auto& groupInfo = GetBindingInfo();
-
- for (uint32_t binding : IterateBitSet(groupInfo.mask)) {
- // For dynamic resources, Dawn uses root descriptor in D3D12 backend.
- // So there is no need to allocate the descriptor from descriptor heap. Skip counting
- // dynamic resources for calculating size of descriptor heap.
- if (groupInfo.hasDynamicOffset[binding]) {
- continue;
- }
-
- switch (groupInfo.types[binding]) {
+ namespace {
+ BindGroupLayout::DescriptorType WGPUBindingTypeToDescriptorType(
+ wgpu::BindingType bindingType) {
+ switch (bindingType) {
case wgpu::BindingType::UniformBuffer:
- mBindingOffsets[binding] = mDescriptorCounts[CBV]++;
- break;
+ return BindGroupLayout::DescriptorType::CBV;
case wgpu::BindingType::StorageBuffer:
- mBindingOffsets[binding] = mDescriptorCounts[UAV]++;
- break;
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ return BindGroupLayout::DescriptorType::UAV;
case wgpu::BindingType::SampledTexture:
- mBindingOffsets[binding] = mDescriptorCounts[SRV]++;
- break;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ return BindGroupLayout::DescriptorType::SRV;
case wgpu::BindingType::Sampler:
- mBindingOffsets[binding] = mDescriptorCounts[Sampler]++;
- break;
-
+ return BindGroupLayout::DescriptorType::Sampler;
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
- break;
+ return BindGroupLayout::DescriptorType::UAV;
}
}
+ } // anonymous namespace
+
+ BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
+ : BindGroupLayoutBase(device, descriptor),
+ mDescriptorCounts{},
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ for (BindingIndex bindingIndex = GetDynamicBufferCount(); bindingIndex < GetBindingCount();
+ ++bindingIndex) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ // For dynamic resources, Dawn uses root descriptor in D3D12 backend.
+ // So there is no need to allocate the descriptor from descriptor heap.
+ // This loop starts after the dynamic buffer indices to skip counting
+ // dynamic resources in calculating the size of the descriptor heap.
+ ASSERT(!bindingInfo.hasDynamicOffset);
+
+ DescriptorType descriptorType = WGPUBindingTypeToDescriptorType(bindingInfo.type);
+ mBindingOffsets[bindingIndex] = mDescriptorCounts[descriptorType]++;
+ }
auto SetDescriptorRange = [&](uint32_t index, uint32_t count, uint32_t* baseRegister,
D3D12_DESCRIPTOR_RANGE_TYPE type) -> bool {
@@ -93,48 +100,43 @@ namespace dawn_native { namespace d3d12 {
D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER);
descriptorOffsets[Sampler] = 0;
- for (uint32_t binding : IterateBitSet(groupInfo.mask)) {
- if (groupInfo.hasDynamicOffset[binding]) {
+ for (BindingIndex bindingIndex = 0; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ if (bindingInfo.hasDynamicOffset) {
// Dawn is using values in mBindingOffsets to decide register number in HLSL.
// Root descriptor needs to set this value to set correct register number in
// generated HLSL shader.
- switch (groupInfo.types[binding]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
case wgpu::BindingType::StorageBuffer:
- mBindingOffsets[binding] = baseRegister++;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ mBindingOffsets[bindingIndex] = baseRegister++;
break;
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
continue;
}
- switch (groupInfo.types[binding]) {
- case wgpu::BindingType::UniformBuffer:
- mBindingOffsets[binding] += descriptorOffsets[CBV];
- break;
- case wgpu::BindingType::StorageBuffer:
- mBindingOffsets[binding] += descriptorOffsets[UAV];
- break;
- case wgpu::BindingType::SampledTexture:
- mBindingOffsets[binding] += descriptorOffsets[SRV];
- break;
- case wgpu::BindingType::Sampler:
- mBindingOffsets[binding] += descriptorOffsets[Sampler];
- break;
+ // TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
+ DescriptorType descriptorType = WGPUBindingTypeToDescriptorType(bindingInfo.type);
+ mBindingOffsets[bindingIndex] += descriptorOffsets[descriptorType];
+ }
+ }
- case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
- UNREACHABLE();
- break;
+ BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return mBindGroupAllocator.Allocate(device, descriptor);
+ }
- // TODO(shaobo.yan@intel.com): Implement dynamic buffer offset.
- }
- }
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
}
const std::array<uint32_t, kMaxBindingsPerGroup>& BindGroupLayout::GetBindingOffsets() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
index e5766e635d2..7d393ec65d4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
@@ -17,16 +17,21 @@
#include "dawn_native/BindGroupLayout.h"
+#include "common/SlabAllocator.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
+ class BindGroup;
class Device;
class BindGroupLayout : public BindGroupLayoutBase {
public:
BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor);
+ BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
enum DescriptorType {
CBV,
UAV,
@@ -47,6 +52,8 @@ namespace dawn_native { namespace d3d12 {
std::array<uint32_t, kMaxBindingsPerGroup> mBindingOffsets;
std::array<uint32_t, DescriptorType::Count> mDescriptorCounts;
D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index b875403bf9a..4fb6f20a697 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -20,6 +20,8 @@
#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -52,6 +54,10 @@ namespace dawn_native { namespace d3d12 {
if (usage & wgpu::BufferUsage::Storage) {
resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
}
+ if (usage & kReadOnlyStorage) {
+ resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+ D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+ }
if (usage & wgpu::BufferUsage::Indirect) {
resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
}
@@ -78,7 +84,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_RESOURCE_DESC resourceDescriptor;
resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
resourceDescriptor.Alignment = 0;
- resourceDescriptor.Width = GetD3D12Size();
+ resourceDescriptor.Width = GetSize();
resourceDescriptor.Height = 1;
resourceDescriptor.DepthOrArraySize = 1;
resourceDescriptor.MipLevels = 1;
@@ -119,11 +125,6 @@ namespace dawn_native { namespace d3d12 {
DestroyInternal();
}
- uint32_t Buffer::GetD3D12Size() const {
- // TODO(enga@google.com): TODO investigate if this needs to be a constraint at the API level
- return Align(GetSize(), 256);
- }
-
ComPtr<ID3D12Resource> Buffer::GetD3D12Resource() const {
return mResourceAllocation.GetD3D12Resource();
}
@@ -131,6 +132,29 @@ namespace dawn_native { namespace d3d12 {
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
// cause subsequent errors.
+ bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+
+ // Return the resource barrier.
+ return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
+ }
+
+ void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage) {
+ D3D12_RESOURCE_BARRIER barrier;
+
+ if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ }
+ }
+
+ // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+ // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+ // cause subsequent errors.
bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
wgpu::BufferUsage newUsage) {
@@ -203,15 +227,6 @@ namespace dawn_native { namespace d3d12 {
return true;
}
- void Buffer::TransitionUsageNow(CommandRecordingContext* commandContext,
- wgpu::BufferUsage usage) {
- D3D12_RESOURCE_BARRIER barrier;
-
- if (TransitionUsageAndGetResourceBarrier(commandContext, &barrier, usage)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
- }
- }
-
D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
return mResourceAllocation.GetGPUPointer();
}
@@ -230,7 +245,12 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- mWrittenMappedRange = {0, GetSize()};
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockMappableHeap(heap));
+
+ mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &mWrittenMappedRange,
reinterpret_cast<void**>(mappedPointer)),
"D3D12 map at creation"));
@@ -238,8 +258,13 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
+ // The mapped buffer can be accessed at any time, so we must make the buffer resident and
+ // lock it to ensure it is never evicted.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockMappableHeap(heap));
+
mWrittenMappedRange = {};
- D3D12_RANGE readRange = {0, GetSize()};
+ D3D12_RANGE readRange = {0, static_cast<size_t>(GetSize())};
char* data = nullptr;
DAWN_TRY(
CheckHRESULT(GetD3D12Resource()->Map(0, &readRange, reinterpret_cast<void**>(&data)),
@@ -252,7 +277,12 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
- mWrittenMappedRange = {0, GetSize()};
+ // The mapped buffer can be accessed at any time, so we must make the buffer resident and
+ // lock it to ensure it is never evicted.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockMappableHeap(heap));
+
+ mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
char* data = nullptr;
DAWN_TRY(CheckHRESULT(
GetD3D12Resource()->Map(0, &mWrittenMappedRange, reinterpret_cast<void**>(&data)),
@@ -266,10 +296,21 @@ namespace dawn_native { namespace d3d12 {
void Buffer::UnmapImpl() {
GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
+ // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+ // them when they are unmapped.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockMappableHeap(heap);
mWrittenMappedRange = {};
}
void Buffer::DestroyImpl() {
+ // We must ensure that if a mapped buffer is destroyed, it does not leave a dangling lock
+ // reference on its heap.
+ if (IsMapped()) {
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockMappableHeap(heap);
+ }
+
ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 6a5b9366a40..aa80d832dd4 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -33,14 +33,15 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
- uint32_t GetD3D12Size() const;
ComPtr<ID3D12Resource> GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
void OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite);
- bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::BufferUsage newUsage);
- void TransitionUsageNow(CommandRecordingContext* commandContext, wgpu::BufferUsage usage);
+
+ bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::BufferUsage newUsage);
private:
// Dawn API
@@ -52,6 +53,10 @@ namespace dawn_native { namespace d3d12 {
bool IsMapWritable() const override;
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::BufferUsage newUsage);
+
ResourceHeapAllocation mResourceAllocation;
bool mFixedResourceState = false;
wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index 1b8118ea6a2..104e5b14876 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -31,6 +31,7 @@
#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
+#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
#include "dawn_native/d3d12/TextureD3D12.h"
#include "dawn_native/d3d12/UtilsD3D12.h"
@@ -52,18 +53,40 @@ namespace dawn_native { namespace d3d12 {
}
}
- bool CanUseCopyResource(const uint32_t sourceNumMipLevels,
- const Extent3D& srcSize,
- const Extent3D& dstSize,
- const Extent3D& copySize) {
- if (sourceNumMipLevels == 1 && srcSize.width == dstSize.width &&
- srcSize.height == dstSize.height && srcSize.depth == dstSize.depth &&
- srcSize.width == copySize.width && srcSize.height == copySize.height &&
- srcSize.depth == copySize.depth) {
- return true;
- }
-
- return false;
+ bool CanUseCopyResource(const Texture* src, const Texture* dst, const Extent3D& copySize) {
+ // Checked by validation
+ ASSERT(src->GetSampleCount() == dst->GetSampleCount());
+ ASSERT(src->GetFormat().format == dst->GetFormat().format);
+
+ const Extent3D& srcSize = src->GetSize();
+ const Extent3D& dstSize = dst->GetSize();
+
+ auto GetCopyDepth = [](const Texture* texture) {
+ switch (texture->GetDimension()) {
+ case wgpu::TextureDimension::e1D:
+ return 1u;
+ case wgpu::TextureDimension::e2D:
+ return texture->GetArrayLayers();
+ case wgpu::TextureDimension::e3D:
+ return texture->GetSize().depth;
+ }
+ };
+
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
+ // In order to use D3D12's copy resource, the textures must be the same dimensions, and
+ // the copy must be of the entire resource.
+ // TODO(dawn:129): Support 1D textures.
+ return src->GetDimension() == dst->GetDimension() && //
+ dst->GetNumMipLevels() == 1 && //
+ src->GetNumMipLevels() == 1 && // A copy command is of a single mip, so if a
+ // resource has more than one, we definitely
+ // cannot use CopyResource.
+ copySize.width == dstSize.width && //
+ copySize.width == srcSize.width && //
+ copySize.height == dstSize.height && //
+ copySize.height == srcSize.height && //
+ copySize.depth == GetCopyDepth(src) && //
+ copySize.depth == GetCopyDepth(dst);
}
} // anonymous namespace
@@ -71,74 +94,56 @@ namespace dawn_native { namespace d3d12 {
class BindGroupStateTracker : public BindGroupAndStorageBarrierTrackerBase<false, uint64_t> {
public:
BindGroupStateTracker(Device* device)
- : BindGroupAndStorageBarrierTrackerBase(), mDevice(device) {
+ : BindGroupAndStorageBarrierTrackerBase(),
+ mAllocator(device->GetShaderVisibleDescriptorAllocator()) {
}
void SetInComputePass(bool inCompute_) {
mInCompute = inCompute_;
}
- MaybeError AllocateDescriptorHeaps(Device* device) {
- // This function should only be called once.
- ASSERT(mCbvSrvUavGPUDescriptorHeap.Get() == nullptr &&
- mSamplerGPUDescriptorHeap.Get() == nullptr);
-
- DescriptorHeapAllocator* descriptorHeapAllocator = device->GetDescriptorHeapAllocator();
-
- if (mCbvSrvUavDescriptorHeapSize > 0) {
- DAWN_TRY_ASSIGN(
- mCbvSrvUavGPUDescriptorHeap,
- descriptorHeapAllocator->AllocateGPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
- mCbvSrvUavDescriptorHeapSize));
- }
-
- if (mSamplerDescriptorHeapSize > 0) {
- DAWN_TRY_ASSIGN(mSamplerGPUDescriptorHeap, descriptorHeapAllocator->AllocateGPUHeap(
- D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER,
- mSamplerDescriptorHeapSize));
- }
-
- uint32_t cbvSrvUavDescriptorIndex = 0;
- uint32_t samplerDescriptorIndex = 0;
- for (BindGroup* group : mBindGroupsToAllocate) {
- ASSERT(group);
- ASSERT(cbvSrvUavDescriptorIndex +
- ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount() <=
- mCbvSrvUavDescriptorHeapSize);
- ASSERT(samplerDescriptorIndex +
- ToBackend(group->GetLayout())->GetSamplerDescriptorCount() <=
- mSamplerDescriptorHeapSize);
- group->AllocateDescriptors(mCbvSrvUavGPUDescriptorHeap, &cbvSrvUavDescriptorIndex,
- mSamplerGPUDescriptorHeap, &samplerDescriptorIndex);
+ MaybeError Apply(CommandRecordingContext* commandContext) {
+ // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
+ // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
+ // at any given time. This means that when we switch heaps, all other currently bound
+ // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
+ // the signal to change the bounded heaps.
+ // Re-populating all bindgroups after the last one fails causes duplicated allocations
+ // to occur on overflow.
+ // TODO(bryan.bernhart@intel.com): Consider further optimization.
+ bool didCreateBindGroups = true;
+ for (uint32_t index : IterateBitSet(mDirtyBindGroups)) {
+ DAWN_TRY_ASSIGN(didCreateBindGroups,
+ ToBackend(mBindGroups[index])->Populate(mAllocator));
+ if (!didCreateBindGroups) {
+ break;
+ }
}
- ASSERT(cbvSrvUavDescriptorIndex == mCbvSrvUavDescriptorHeapSize);
- ASSERT(samplerDescriptorIndex == mSamplerDescriptorHeapSize);
+ // This will re-create bindgroups for both heaps even if only one overflowed.
+ // TODO(bryan.bernhart@intel.com): Consider re-allocating heaps independently
+ // such that overflowing one doesn't re-allocate the another.
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ if (!didCreateBindGroups) {
+ DAWN_TRY(mAllocator->AllocateAndSwitchShaderVisibleHeaps());
- return {};
- }
+ mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
+ mDirtyBindGroups |= mBindGroupLayoutsMask;
- // This function must only be called before calling AllocateDescriptorHeaps().
- void TrackSetBindGroup(BindGroup* group, uint32_t index, uint32_t indexInSubmit) {
- if (mBindGroups[index] != group) {
- mBindGroups[index] = group;
- if (!group->TestAndSetCounted(mDevice->GetPendingCommandSerial(), indexInSubmit)) {
- const BindGroupLayout* layout = ToBackend(group->GetLayout());
+ // Must be called before applying the bindgroups.
+ SetID3D12DescriptorHeaps(commandList);
- mCbvSrvUavDescriptorHeapSize += layout->GetCbvUavSrvDescriptorCount();
- mSamplerDescriptorHeapSize += layout->GetSamplerDescriptorCount();
- mBindGroupsToAllocate.push_back(group);
+ for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
+ DAWN_TRY_ASSIGN(didCreateBindGroups,
+ ToBackend(mBindGroups[index])->Populate(mAllocator));
+ ASSERT(didCreateBindGroups);
}
}
- }
-
- void Apply(CommandRecordingContext* commandContext) {
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index,
- ToBackend(mBindGroups[index]), mDynamicOffsetCounts[index],
- mDynamicOffsets[index].data());
+ BindGroup* group = ToBackend(mBindGroups[index]);
+ ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
}
if (mInCompute) {
@@ -148,11 +153,13 @@ namespace dawn_native { namespace d3d12 {
switch (bindingType) {
case wgpu::BindingType::StorageBuffer:
ToBackend(mBuffers[index][binding])
- ->TransitionUsageNow(commandContext,
- wgpu::BufferUsage::Storage);
+ ->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::Storage);
break;
case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
// Not implemented.
case wgpu::BindingType::UniformBuffer:
@@ -169,56 +176,47 @@ namespace dawn_native { namespace d3d12 {
}
}
DidApply();
- }
- void Reset() {
- for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
- mBindGroups[i] = nullptr;
- }
+ return {};
}
- void SetID3D12DescriptorHeaps(ComPtr<ID3D12GraphicsCommandList> commandList) {
+ void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
ASSERT(commandList != nullptr);
- ID3D12DescriptorHeap* descriptorHeaps[2] = {mCbvSrvUavGPUDescriptorHeap.Get(),
- mSamplerGPUDescriptorHeap.Get()};
- if (descriptorHeaps[0] && descriptorHeaps[1]) {
- commandList->SetDescriptorHeaps(2, descriptorHeaps);
- } else if (descriptorHeaps[0]) {
- commandList->SetDescriptorHeaps(1, descriptorHeaps);
- } else if (descriptorHeaps[1]) {
- commandList->SetDescriptorHeaps(1, &descriptorHeaps[1]);
- }
+ std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps =
+ mAllocator->GetShaderVisibleHeaps();
+ ASSERT(descriptorHeaps[0] != nullptr);
+ ASSERT(descriptorHeaps[1] != nullptr);
+ commandList->SetDescriptorHeaps(2, descriptorHeaps.data());
}
private:
void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
- PipelineLayout* pipelineLayout,
+ const PipelineLayout* pipelineLayout,
uint32_t index,
BindGroup* group,
uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
+ const uint64_t* dynamicOffsets) {
+ ASSERT(dynamicOffsetCount == group->GetLayout()->GetDynamicBufferCount());
+
// Usually, the application won't set the same offsets many times,
// so always try to apply dynamic offsets even if the offsets stay the same
- if (dynamicOffsetCount) {
- // Update dynamic offsets
- const BindGroupLayout::LayoutBindingInfo& layout =
- group->GetLayout()->GetBindingInfo();
- uint32_t currentDynamicBufferIndex = 0;
-
- for (uint32_t bindingIndex : IterateBitSet(layout.hasDynamicOffset)) {
- ASSERT(dynamicOffsetCount > 0);
+ if (dynamicOffsetCount != 0) {
+ // Update dynamic offsets.
+ // Dynamic buffer bindings are packed at the beginning of the layout.
+ for (BindingIndex bindingIndex = 0; bindingIndex < dynamicOffsetCount;
+ ++bindingIndex) {
uint32_t parameterIndex =
pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
// Calculate buffer locations that root descriptors links to. The location
// is (base buffer location + initial offset + dynamic offset)
- uint64_t dynamicOffset = dynamicOffsets[currentDynamicBufferIndex];
+ uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
uint64_t offset = binding.offset + dynamicOffset;
D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
ToBackend(binding.buffer)->GetVA() + offset;
- switch (layout.types[bindingIndex]) {
+ switch (group->GetLayout()->GetBindingInfo(bindingIndex).type) {
case wgpu::BindingType::UniformBuffer:
if (mInCompute) {
commandList->SetComputeRootConstantBufferView(parameterIndex,
@@ -237,15 +235,23 @@ namespace dawn_native { namespace d3d12 {
bufferLocation);
}
break;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ if (mInCompute) {
+ commandList->SetComputeRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ } else {
+ commandList->SetGraphicsRootShaderResourceView(parameterIndex,
+ bufferLocation);
+ }
+ break;
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
-
- ++currentDynamicBufferIndex;
}
}
@@ -254,135 +260,87 @@ namespace dawn_native { namespace d3d12 {
return;
}
- uint32_t cbvUavSrvCount = ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
- uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
+ const uint32_t cbvUavSrvCount =
+ ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+ const uint32_t samplerCount =
+ ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
if (cbvUavSrvCount > 0) {
uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
-
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
+ group->GetBaseCbvUavSrvDescriptor();
if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(
- parameterIndex,
- mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
} else {
- commandList->SetGraphicsRootDescriptorTable(
- parameterIndex,
- mCbvSrvUavGPUDescriptorHeap.GetGPUHandle(group->GetCbvUavSrvHeapOffset()));
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
}
if (samplerCount > 0) {
uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
-
+ const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
+ group->GetBaseSamplerDescriptor();
if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(
- parameterIndex,
- mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
} else {
- commandList->SetGraphicsRootDescriptorTable(
- parameterIndex,
- mSamplerGPUDescriptorHeap.GetGPUHandle(group->GetSamplerHeapOffset()));
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
}
}
}
- uint32_t mCbvSrvUavDescriptorHeapSize = 0;
- uint32_t mSamplerDescriptorHeapSize = 0;
- std::deque<BindGroup*> mBindGroupsToAllocate = {};
bool mInCompute = false;
- DescriptorHeapHandle mCbvSrvUavGPUDescriptorHeap = {};
- DescriptorHeapHandle mSamplerGPUDescriptorHeap = {};
-
- Device* mDevice;
+ ShaderVisibleDescriptorAllocator* mAllocator;
};
- class RenderPassDescriptorHeapTracker {
- public:
- RenderPassDescriptorHeapTracker(Device* device) : mDevice(device) {
- }
-
- // This function must only be called before calling AllocateRTVAndDSVHeaps().
- void TrackRenderPass(const BeginRenderPassCmd* renderPass) {
- DAWN_ASSERT(mRTVHeap.Get() == nullptr && mDSVHeap.Get() == nullptr);
-
- mNumRTVs += static_cast<uint32_t>(
- renderPass->attachmentState->GetColorAttachmentsMask().count());
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- ++mNumDSVs;
- }
- }
-
- MaybeError AllocateRTVAndDSVHeaps() {
- // This function should only be called once.
- DAWN_ASSERT(mRTVHeap.Get() == nullptr && mDSVHeap.Get() == nullptr);
- DescriptorHeapAllocator* allocator = mDevice->GetDescriptorHeapAllocator();
- if (mNumRTVs > 0) {
- DAWN_TRY_ASSIGN(
- mRTVHeap, allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_RTV, mNumRTVs));
- }
- if (mNumDSVs > 0) {
- DAWN_TRY_ASSIGN(
- mDSVHeap, allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_DSV, mNumDSVs));
- }
- return {};
- }
+ namespace {
// TODO(jiawei.shao@intel.com): use hash map <RenderPass, OMSetRenderTargetArgs> as
// cache to avoid redundant RTV and DSV memory allocations.
- OMSetRenderTargetArgs GetSubpassOMSetRenderTargetArgs(BeginRenderPassCmd* renderPass) {
+ ResultOrError<OMSetRenderTargetArgs> GetSubpassOMSetRenderTargetArgs(
+ BeginRenderPassCmd* renderPass,
+ Device* device) {
OMSetRenderTargetArgs args = {};
- unsigned int rtvIndex = 0;
uint32_t rtvCount = static_cast<uint32_t>(
renderPass->attachmentState->GetColorAttachmentsMask().count());
- DAWN_ASSERT(mAllocatedRTVs + rtvCount <= mNumRTVs);
+ DescriptorHeapAllocator* allocator = device->GetDescriptorHeapAllocator();
+ DescriptorHeapHandle rtvHeap;
+ DAWN_TRY_ASSIGN(rtvHeap,
+ allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_RTV, rtvCount));
+ ASSERT(rtvHeap.Get() != nullptr);
+ ID3D12Device* d3dDevice = device->GetD3D12Device().Get();
+ unsigned int rtvIndex = 0;
for (uint32_t i :
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ ASSERT(rtvIndex < rtvCount);
TextureView* view = ToBackend(renderPass->colorAttachments[i].view).Get();
- D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = mRTVHeap.GetCPUHandle(mAllocatedRTVs);
+ D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetCPUHandle(rtvIndex);
D3D12_RENDER_TARGET_VIEW_DESC rtvDesc = view->GetRTVDescriptor();
- mDevice->GetD3D12Device()->CreateRenderTargetView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &rtvDesc, rtvHandle);
- args.RTVs[i] = rtvHandle;
+ d3dDevice->CreateRenderTargetView(ToBackend(view->GetTexture())->GetD3D12Resource(),
+ &rtvDesc, rtvHandle);
+ args.RTVs[rtvIndex] = rtvHandle;
++rtvIndex;
- ++mAllocatedRTVs;
}
- args.numRTVs = rtvIndex;
+ args.numRTVs = rtvCount;
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- DAWN_ASSERT(mAllocatedDSVs < mNumDSVs);
+ DescriptorHeapHandle dsvHeap;
+ DAWN_TRY_ASSIGN(dsvHeap,
+ allocator->AllocateCPUHeap(D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1));
+ ASSERT(dsvHeap.Get() != nullptr);
TextureView* view = ToBackend(renderPass->depthStencilAttachment.view).Get();
- D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = mDSVHeap.GetCPUHandle(mAllocatedDSVs);
+ D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap.GetCPUHandle(0);
D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc = view->GetDSVDescriptor();
- mDevice->GetD3D12Device()->CreateDepthStencilView(
- ToBackend(view->GetTexture())->GetD3D12Resource(), &dsvDesc, dsvHandle);
+ d3dDevice->CreateDepthStencilView(ToBackend(view->GetTexture())->GetD3D12Resource(),
+ &dsvDesc, dsvHandle);
args.dsv = dsvHandle;
-
- ++mAllocatedDSVs;
}
return args;
}
- bool IsHeapAllocationCompleted() const {
- return mNumRTVs == mAllocatedRTVs && mNumDSVs == mAllocatedDSVs;
- }
-
- private:
- Device* mDevice;
- DescriptorHeapHandle mRTVHeap = {};
- DescriptorHeapHandle mDSVHeap = {};
- uint32_t mNumRTVs = 0;
- uint32_t mNumDSVs = 0;
-
- uint32_t mAllocatedRTVs = 0;
- uint32_t mAllocatedDSVs = 0;
- };
-
- namespace {
-
class VertexBufferTracker {
public:
void OnSetVertexBuffer(uint32_t slot, Buffer* buffer, uint64_t offset) {
@@ -475,61 +433,6 @@ namespace dawn_native { namespace d3d12 {
D3D12_INDEX_BUFFER_VIEW mD3D12BufferView = {};
};
- MaybeError AllocateAndSetDescriptorHeaps(Device* device,
- BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassTracker,
- CommandIterator* commands,
- uint32_t indexInSubmit) {
- {
- Command type;
-
- auto HandleCommand = [&](CommandIterator* commands, Command type) {
- switch (type) {
- case Command::SetBindGroup: {
- SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
- BindGroup* group = ToBackend(cmd->group.Get());
- if (cmd->dynamicOffsetCount) {
- commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
- }
- bindingTracker->TrackSetBindGroup(group, cmd->index, indexInSubmit);
- } break;
- case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = commands->NextCommand<BeginRenderPassCmd>();
- renderPassTracker->TrackRenderPass(cmd);
- } break;
- default:
- SkipCommand(commands, type);
- }
- };
-
- while (commands->NextCommandId(&type)) {
- switch (type) {
- case Command::ExecuteBundles: {
- ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
- auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
-
- for (uint32_t i = 0; i < cmd->count; ++i) {
- CommandIterator* commands = bundles[i]->GetCommands();
- commands->Reset();
- while (commands->NextCommandId(&type)) {
- HandleCommand(commands, type);
- }
- }
- } break;
- default:
- HandleCommand(commands, type);
- break;
- }
- }
-
- commands->Reset();
- }
-
- DAWN_TRY(renderPassTracker->AllocateRTVAndDSVHeaps());
- DAWN_TRY(bindingTracker->AllocateDescriptorHeaps(device));
- return {};
- }
-
void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
BeginRenderPassCmd* renderPass) {
ASSERT(renderPass != nullptr);
@@ -547,10 +450,10 @@ namespace dawn_native { namespace d3d12 {
Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
// Transition the usages of the color attachment and resolve target.
- colorTexture->TransitionUsageNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
- resolveTexture->TransitionUsageNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_DEST);
+ colorTexture->TrackUsageAndTransitionNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_SOURCE);
+ resolveTexture->TrackUsageAndTransitionNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_DEST);
// Do MSAA resolve with ResolveSubResource().
ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
@@ -574,28 +477,19 @@ namespace dawn_native { namespace d3d12 {
FreeCommands(&mCommands);
}
- MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext,
- uint32_t indexInSubmit) {
+ MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
Device* device = ToBackend(GetDevice());
BindGroupStateTracker bindingTracker(device);
- RenderPassDescriptorHeapTracker renderPassTracker(device);
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
- // Precompute the allocation of bindgroups in descriptor heaps
- // TODO(cwallez@chromium.org): Iterating over all the commands here is inefficient. We
- // should have a system where commands and descriptors are recorded in parallel then the
- // heaps set using a small CommandList inserted just before the main CommandList.
- {
- DAWN_TRY(AllocateAndSetDescriptorHeaps(device, &bindingTracker, &renderPassTracker,
- &mCommands, indexInSubmit));
- bindingTracker.Reset();
- bindingTracker.SetID3D12DescriptorHeaps(commandList);
- }
+ // Make sure we use the correct descriptors for this command list. Could be done once per
+ // actual command list but here is ok because there should be few command buffers.
+ bindingTracker.SetID3D12DescriptorHeaps(commandList);
// Records the necessary barriers for the resource usage pre-computed by the frontend
- auto TransitionForPass = [](CommandRecordingContext* commandContext,
- const PassResourceUsage& usages) -> bool {
+ auto PrepareResourcesForSubmission = [](CommandRecordingContext* commandContext,
+ const PassResourceUsage& usages) -> bool {
std::vector<D3D12_RESOURCE_BARRIER> barriers;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
@@ -605,8 +499,8 @@ namespace dawn_native { namespace d3d12 {
for (size_t i = 0; i < usages.buffers.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.buffers[i])
- ->TransitionUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.bufferUsages[i])) {
+ ->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
barriers.push_back(barrier);
}
bufferUsages |= usages.bufferUsages[i];
@@ -629,8 +523,8 @@ namespace dawn_native { namespace d3d12 {
for (size_t i = 0; i < usages.textures.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.textures[i])
- ->TransitionUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.textureUsages[i])) {
+ ->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.textureUsages[i])) {
barriers.push_back(barrier);
}
textureUsages |= usages.textureUsages[i];
@@ -653,38 +547,46 @@ namespace dawn_native { namespace d3d12 {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
+ PrepareResourcesForSubmission(commandContext,
+ passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(true);
- RecordComputePass(commandContext, &bindingTracker);
+ DAWN_TRY(RecordComputePass(commandContext, &bindingTracker));
nextPassNumber++;
- } break;
+ break;
+ }
case Command::BeginRenderPass: {
BeginRenderPassCmd* beginRenderPassCmd =
mCommands.NextCommand<BeginRenderPassCmd>();
- const bool passHasUAV =
- TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
+ const bool passHasUAV = PrepareResourcesForSubmission(
+ commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(false);
- RecordRenderPass(commandContext, &bindingTracker, &renderPassTracker,
- beginRenderPassCmd, passHasUAV);
+
+ LazyClearRenderPassAttachments(beginRenderPassCmd);
+ DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
+ passHasUAV));
nextPassNumber++;
- } break;
+ break;
+ }
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
- srcBuffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopyDst);
+ srcBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopySrc);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext,
+ wgpu::BufferUsage::CopyDst);
commandList->CopyBufferRegion(
dstBuffer->GetD3D12Resource().Get(), copy->destinationOffset,
srcBuffer->GetD3D12Resource().Get(), copy->sourceOffset, copy->size);
- } break;
+ break;
+ }
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
@@ -701,8 +603,9 @@ namespace dawn_native { namespace d3d12 {
copy->destination.arrayLayer, 1);
}
- buffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopySrc);
- texture->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopyDst);
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+ texture->TrackUsageAndTransitionNow(commandContext,
+ wgpu::TextureUsage::CopyDst);
auto copySplit = ComputeTextureCopySplit(
copy->destination.origin, copy->copySize, texture->GetFormat(),
@@ -726,7 +629,8 @@ namespace dawn_native { namespace d3d12 {
info.textureOffset.y, info.textureOffset.z,
&bufferLocation, &sourceRegion);
}
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
@@ -736,8 +640,9 @@ namespace dawn_native { namespace d3d12 {
texture->EnsureSubresourceContentInitialized(
commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
- texture->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopySrc);
- buffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopyDst);
+ texture->TrackUsageAndTransitionNow(commandContext,
+ wgpu::TextureUsage::CopySrc);
+ buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
TextureCopySplit copySplit = ComputeTextureCopySplit(
copy->source.origin, copy->copySize, texture->GetFormat(),
@@ -763,7 +668,8 @@ namespace dawn_native { namespace d3d12 {
info.bufferOffset.y, info.bufferOffset.z,
&textureLocation, &sourceRegion);
}
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
@@ -783,11 +689,11 @@ namespace dawn_native { namespace d3d12 {
commandContext, copy->destination.mipLevel, 1,
copy->destination.arrayLayer, 1);
}
- source->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopySrc);
- destination->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopyDst);
+ source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc);
+ destination->TrackUsageAndTransitionNow(commandContext,
+ wgpu::TextureUsage::CopyDst);
- if (CanUseCopyResource(source->GetNumMipLevels(), source->GetSize(),
- destination->GetSize(), copy->copySize)) {
+ if (CanUseCopyResource(source, destination, copy->copySize)) {
commandList->CopyResource(destination->GetD3D12Resource(),
source->GetD3D12Resource());
} else {
@@ -807,18 +713,21 @@ namespace dawn_native { namespace d3d12 {
&dstLocation, copy->destination.origin.x, copy->destination.origin.y,
copy->destination.origin.z, &srcLocation, &sourceRegion);
}
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
- DAWN_ASSERT(renderPassTracker.IsHeapAllocationCompleted());
return {};
}
- void CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker) {
+ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker) {
PipelineLayout* lastLayout = nullptr;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
@@ -828,26 +737,28 @@ namespace dawn_native { namespace d3d12 {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
commandList->ExecuteIndirect(signature.Get(), 1,
buffer->GetD3D12Resource().Get(),
dispatch->indirectOffset, nullptr, 0);
- } break;
+ break;
+ }
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
- return;
- } break;
+ return {};
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
@@ -860,7 +771,8 @@ namespace dawn_native { namespace d3d12 {
bindingTracker->OnSetPipeline(pipeline);
lastLayout = layout;
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
@@ -873,7 +785,8 @@ namespace dawn_native { namespace d3d12 {
bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
dynamicOffsets);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
@@ -886,7 +799,8 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
mCommands.NextCommand<PopDebugGroupCmd>();
@@ -896,7 +810,8 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixEndEventOnCommandList(commandList);
}
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
@@ -909,11 +824,17 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
+
+ return {};
}
void CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
@@ -922,15 +843,6 @@ namespace dawn_native { namespace d3d12 {
for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
TextureView* view = ToBackend(attachmentInfo.view.Get());
- Texture* texture = ToBackend(view->GetTexture());
-
- // Load operation is changed to clear when the texture is uninitialized.
- if (!texture->IsSubresourceContentInitialized(view->GetBaseMipLevel(), 1,
- view->GetBaseArrayLayer(), 1) &&
- attachmentInfo.loadOp == wgpu::LoadOp::Load) {
- attachmentInfo.loadOp = wgpu::LoadOp::Clear;
- attachmentInfo.clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
- }
// Set color load operation.
renderPassBuilder->SetRenderTargetBeginningAccess(
@@ -942,49 +854,24 @@ namespace dawn_native { namespace d3d12 {
Texture* resolveDestinationTexture =
ToBackend(resolveDestinationView->GetTexture());
- resolveDestinationTexture->TransitionUsageNow(commandContext,
- D3D12_RESOURCE_STATE_RESOLVE_DEST);
-
- // Mark resolve target as initialized to prevent clearing later.
- resolveDestinationTexture->SetIsSubresourceContentInitialized(
- true, resolveDestinationView->GetBaseMipLevel(), 1,
- resolveDestinationView->GetBaseArrayLayer(), 1);
+ resolveDestinationTexture->TrackUsageAndTransitionNow(
+ commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST);
renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
view, resolveDestinationView);
} else {
renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
}
-
- // Set whether or not the texture requires initialization after the pass.
- bool isInitialized = attachmentInfo.storeOp == wgpu::StoreOp::Store;
- texture->SetIsSubresourceContentInitialized(isInitialized, view->GetBaseMipLevel(), 1,
- view->GetBaseArrayLayer(), 1);
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
RenderPassDepthStencilAttachmentInfo& attachmentInfo =
renderPass->depthStencilAttachment;
TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
- Texture* texture = ToBackend(view->GetTexture());
const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
- // Load operations are changed to clear when the texture is uninitialized.
- if (!view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
- view->GetLayerCount())) {
- if (hasDepth && attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- }
- if (hasStencil && attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- }
- }
-
// Set depth/stencil load operations.
if (hasDepth) {
renderPassBuilder->SetDepthAccess(
@@ -1002,12 +889,6 @@ namespace dawn_native { namespace d3d12 {
renderPassBuilder->SetStencilNoAccess();
}
- // Set whether or not the texture requires initialization.
- ASSERT(!hasDepth || !hasStencil ||
- attachmentInfo.depthStoreOp == attachmentInfo.stencilStoreOp);
- bool isInitialized = attachmentInfo.depthStoreOp == wgpu::StoreOp::Store;
- texture->SetIsSubresourceContentInitialized(isInitialized, view->GetBaseMipLevel(), 1,
- view->GetBaseArrayLayer(), 1);
} else {
renderPassBuilder->SetDepthStencilNoAccess();
}
@@ -1070,16 +951,15 @@ namespace dawn_native { namespace d3d12 {
: nullptr);
}
- void CommandBuffer::RecordRenderPass(
- CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
- BeginRenderPassCmd* renderPass,
- const bool passHasUAV) {
- OMSetRenderTargetArgs args =
- renderPassDescriptorHeapTracker->GetSubpassOMSetRenderTargetArgs(renderPass);
+ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ const bool passHasUAV) {
+ Device* device = ToBackend(GetDevice());
+ OMSetRenderTargetArgs args;
+ DAWN_TRY_ASSIGN(args, GetSubpassOMSetRenderTargetArgs(renderPass, device));
- const bool useRenderPass = GetDevice()->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+ const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
// renderPassBuilder must be scoped to RecordRenderPass because any underlying
// D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
@@ -1123,32 +1003,34 @@ namespace dawn_native { namespace d3d12 {
VertexBufferTracker vertexBufferTracker = {};
IndexBufferTracker indexBufferTracker = {};
- auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+ auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
switch (type) {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
- } break;
+ break;
+ }
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
draw->firstIndex, draw->baseVertex,
draw->firstInstance);
- } break;
+ break;
+ }
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
@@ -1156,12 +1038,13 @@ namespace dawn_native { namespace d3d12 {
commandList->ExecuteIndirect(signature.Get(), 1,
buffer->GetD3D12Resource().Get(),
draw->indirectOffset, nullptr, 0);
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- bindingTracker->Apply(commandContext);
+ DAWN_TRY(bindingTracker->Apply(commandContext));
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
@@ -1170,7 +1053,8 @@ namespace dawn_native { namespace d3d12 {
commandList->ExecuteIndirect(signature.Get(), 1,
buffer->GetD3D12Resource().Get(),
draw->indirectOffset, nullptr, 0);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
@@ -1183,7 +1067,8 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
}
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
iter->NextCommand<PopDebugGroupCmd>();
@@ -1193,7 +1078,8 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixEndEventOnCommandList(commandList);
}
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
@@ -1206,7 +1092,8 @@ namespace dawn_native { namespace d3d12 {
->GetFunctions()
->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
}
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
@@ -1222,7 +1109,8 @@ namespace dawn_native { namespace d3d12 {
lastPipeline = pipeline;
lastLayout = layout;
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
@@ -1235,25 +1123,29 @@ namespace dawn_native { namespace d3d12 {
bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
dynamicOffsets);
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
indexBufferTracker.OnSetIndexBuffer(ToBackend(cmd->buffer.Get()), cmd->offset);
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
cmd->offset);
- } break;
+ break;
+ }
default:
UNREACHABLE();
break;
}
+ return {};
};
Command type;
@@ -1266,14 +1158,15 @@ namespace dawn_native { namespace d3d12 {
} else if (renderPass->attachmentState->GetSampleCount() > 1) {
ResolveMultisampledRenderPass(commandContext, renderPass);
}
- return;
- } break;
+ return {};
+ }
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
commandList->OMSetStencilRef(cmd->reference);
- } break;
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
@@ -1286,7 +1179,8 @@ namespace dawn_native { namespace d3d12 {
viewport.MaxDepth = cmd->maxDepth;
commandList->RSSetViewports(1, &viewport);
- } break;
+ break;
+ }
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
@@ -1297,12 +1191,14 @@ namespace dawn_native { namespace d3d12 {
rect.bottom = cmd->y + cmd->height;
commandList->RSSetScissorRects(1, &rect);
- } break;
+ break;
+ }
case Command::SetBlendColor: {
SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
commandList->OMSetBlendFactor(static_cast<const FLOAT*>(&cmd->color.r));
- } break;
+ break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
@@ -1312,14 +1208,18 @@ namespace dawn_native { namespace d3d12 {
CommandIterator* iter = bundles[i]->GetCommands();
iter->Reset();
while (iter->NextCommandId(&type)) {
- EncodeRenderBundleCommand(iter, type);
+ DAWN_TRY(EncodeRenderBundleCommand(iter, type));
}
}
- } break;
+ break;
+ }
- default: { EncodeRenderBundleCommand(&mCommands, type); } break;
+ default: {
+ DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
+ break;
+ }
}
}
+ return {};
}
-
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index d710d08da99..6a021f1f8f9 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -49,16 +49,15 @@ namespace dawn_native { namespace d3d12 {
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
- MaybeError RecordCommands(CommandRecordingContext* commandContext, uint32_t indexInSubmit);
+ MaybeError RecordCommands(CommandRecordingContext* commandContext);
private:
- void RecordComputePass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker);
- void RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
- BeginRenderPassCmd* renderPass,
- bool passHasUAV);
+ MaybeError RecordComputePass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker);
+ MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ BeginRenderPassCmd* renderPass,
+ bool passHasUAV);
void SetupRenderPass(CommandRecordingContext* commandContext,
BeginRenderPassCmd* renderPass,
RenderPassBuilder* renderPassBuilder);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
index 209009c101b..81dad3c138e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
@@ -14,6 +14,9 @@
#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/CommandAllocatorManager.h"
#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -51,14 +54,14 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- MaybeError CommandRecordingContext::ExecuteCommandList(ID3D12CommandQueue* d3d12CommandQueue) {
+ MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
if (IsOpen()) {
// Shared textures must be transitioned to common state after the last usage in order
// for them to be used by other APIs like D3D11. We ensure this by transitioning to the
// common state right before command list submission. TransitionUsageNow itself ensures
// no unnecessary transitions happen if the resources is already in the common state.
for (Texture* texture : mSharedTextures) {
- texture->TransitionUsageNow(this, D3D12_RESOURCE_STATE_COMMON);
+ texture->TrackUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
}
MaybeError error =
@@ -67,16 +70,28 @@ namespace dawn_native { namespace d3d12 {
Release();
DAWN_TRY(std::move(error));
}
+ DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
+ mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
ID3D12CommandList* d3d12CommandList = GetCommandList();
- d3d12CommandQueue->ExecuteCommandLists(1, &d3d12CommandList);
+ device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
mIsOpen = false;
mSharedTextures.clear();
+ mHeapsPendingUsage.clear();
}
return {};
}
+ void CommandRecordingContext::TrackHeapUsage(Heap* heap, Serial serial) {
+ // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
+ // tracking it more than once.
+ if (heap->GetLastUsage() < serial) {
+ heap->SetLastUsage(serial);
+ mHeapsPendingUsage.push_back(heap);
+ }
+ }
+
ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
ASSERT(mD3d12CommandList != nullptr);
ASSERT(IsOpen());
@@ -96,6 +111,7 @@ namespace dawn_native { namespace d3d12 {
mD3d12CommandList4.Reset();
mIsOpen = false;
mSharedTextures.clear();
+ mHeapsPendingUsage.clear();
}
bool CommandRecordingContext::IsOpen() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
index d501d59692d..932fa4d7bfb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
@@ -35,13 +35,16 @@ namespace dawn_native { namespace d3d12 {
void Release();
bool IsOpen() const;
- MaybeError ExecuteCommandList(ID3D12CommandQueue* d3d12CommandQueue);
+ MaybeError ExecuteCommandList(Device* device);
+
+ void TrackHeapUsage(Heap* heap, Serial serial);
private:
ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
bool mIsOpen = false;
std::set<Texture*> mSharedTextures;
+ std::vector<Heap*> mHeapsPendingUsage;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
index f1b94914a40..1fc81d635f0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
@@ -22,8 +22,17 @@
namespace dawn_native { namespace d3d12 {
- ComputePipeline::ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor)
- : ComputePipelineBase(device, descriptor) {
+ ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ std::unique_ptr<ComputePipeline> pipeline =
+ std::make_unique<ComputePipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
+ Device* device = ToBackend(GetDevice());
uint32_t compileFlags = 0;
#if defined(_DEBUG)
// Enable better shader debugging with the graphics debugging tools.
@@ -33,7 +42,8 @@ namespace dawn_native { namespace d3d12 {
compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
ShaderModule* module = ToBackend(descriptor->computeStage.module);
- const std::string hlslSource = module->GetHLSLSource(ToBackend(GetLayout()));
+ std::string hlslSource;
+ DAWN_TRY_ASSIGN(hlslSource, module->GetHLSLSource(ToBackend(GetLayout())));
ComPtr<ID3DBlob> compiledShader;
ComPtr<ID3DBlob> errors;
@@ -53,6 +63,7 @@ namespace dawn_native { namespace d3d12 {
device->GetD3D12Device()->CreateComputePipelineState(&d3dDesc,
IID_PPV_ARGS(&mPipelineState));
+ return {};
}
ComputePipeline::~ComputePipeline() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
index 7b1af9f6705..9f38bbe8a40 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.h
@@ -25,12 +25,16 @@ namespace dawn_native { namespace d3d12 {
class ComputePipeline : public ComputePipelineBase {
public:
- ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<ComputePipeline*> Create(Device* device,
+ const ComputePipelineDescriptor* descriptor);
+ ComputePipeline() = delete;
~ComputePipeline();
ComPtr<ID3D12PipelineState> GetPipelineState();
private:
+ using ComputePipelineBase::ComputePipelineBase;
+ MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
ComPtr<ID3D12PipelineState> mPipelineState;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index 2db62da4610..fbf4344d50b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -20,6 +20,7 @@
#include "common/SwapChainUtils.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -46,15 +47,24 @@ namespace dawn_native { namespace d3d12 {
return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
}
+ ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
+ : ExternalImageDescriptor(ExternalImageDescriptorType::DXGISharedHandle) {
+ }
+
+ uint64_t SetExternalMemoryReservation(WGPUDevice device, uint64_t requestedReservationSize) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+
+ return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
+ requestedReservationSize);
+ }
+
WGPUTexture WrapSharedHandle(WGPUDevice device,
- const WGPUTextureDescriptor* descriptor,
- HANDLE sharedHandle,
- uint64_t acquireMutexKey) {
+ const ExternalImageDescriptorDXGISharedHandle* descriptor) {
Device* backendDevice = reinterpret_cast<Device*>(device);
- const TextureDescriptor* backendDescriptor =
- reinterpret_cast<const TextureDescriptor*>(descriptor);
- TextureBase* texture =
- backendDevice->WrapSharedHandle(backendDescriptor, sharedHandle, acquireMutexKey);
+ TextureBase* texture = backendDevice->WrapSharedHandle(descriptor, descriptor->sharedHandle,
+ descriptor->acquireMutexKey,
+ descriptor->isSwapChainTexture);
return reinterpret_cast<WGPUTexture>(texture);
}
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
index 2cd46273c56..d91d479c735 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
@@ -23,7 +23,12 @@ namespace dawn_native { namespace d3d12 {
}
std::string message = std::string(context) + " failed with " + std::to_string(result);
- return DAWN_DEVICE_LOST_ERROR(message);
+
+ if (result == DXGI_ERROR_DEVICE_REMOVED) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
+ }
}
MaybeError CheckOutOfMemoryHRESULT(HRESULT result, const char* context) {
@@ -33,4 +38,4 @@ namespace dawn_native { namespace d3d12 {
return CheckHRESULT(result, context);
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
index de9bd0369d7..6505e44aef5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/d3d12/D3D12Info.h"
+#include "common/GPUInfo.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
@@ -45,12 +46,17 @@ namespace dawn_native { namespace d3d12 {
// Windows builds 1809 and above can use the D3D12 render pass API. If we query
// CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
// the render pass API.
+ info.supportsRenderPass = false;
D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
- info.supportsRenderPass = true;
- } else {
- info.supportsRenderPass = false;
+ // Performance regressions been observed when using a render pass on Intel graphics with
+ // RENDER_PASS_TIER_1 available, so fall back to a software emulated render pass on
+ // these platforms.
+ if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
+ !gpu_info::IsIntel(adapter.GetPCIInfo().vendorId)) {
+ info.supportsRenderPass = true;
+ }
}
return info;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp
new file mode 100644
index 00000000000..fd16f1ef58f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.cpp
@@ -0,0 +1,49 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/DescriptorHeapAllocationD3D12.h"
+#include "dawn_native/Error.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ DescriptorHeapAllocation::DescriptorHeapAllocation() : mSizeIncrement(0) {
+ }
+
+ DescriptorHeapAllocation::DescriptorHeapAllocation(
+ uint32_t sizeIncrement,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptorHandle,
+ D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptorHandle)
+ : mSizeIncrement(sizeIncrement),
+ mBaseCPUDescriptorHandle(baseCPUDescriptorHandle),
+ mBaseGPUDescriptorHandle(baseGPUDescriptorHandle) {
+ }
+
+ D3D12_CPU_DESCRIPTOR_HANDLE DescriptorHeapAllocation::GetCPUHandle(uint32_t offset) const {
+ ASSERT(!IsInvalid());
+ D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseCPUDescriptorHandle;
+ cpuHandle.ptr += mSizeIncrement * offset;
+ return cpuHandle;
+ }
+
+ D3D12_GPU_DESCRIPTOR_HANDLE DescriptorHeapAllocation::GetGPUHandle(uint32_t offset) const {
+ ASSERT(!IsInvalid());
+ D3D12_GPU_DESCRIPTOR_HANDLE gpuHandle = mBaseGPUDescriptorHandle;
+ gpuHandle.ptr += mSizeIncrement * offset;
+ return gpuHandle;
+ }
+
+ bool DescriptorHeapAllocation::IsInvalid() const {
+ return mBaseCPUDescriptorHandle.ptr == 0;
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h
new file mode 100644
index 00000000000..30a034c9cc3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocationD3D12.h
@@ -0,0 +1,46 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_
+#define DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_
+
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+#include <cstdint>
+
+namespace dawn_native { namespace d3d12 {
+
+ // Wrapper for a handle into a descriptor heap.
+ class DescriptorHeapAllocation {
+ public:
+ DescriptorHeapAllocation();
+ DescriptorHeapAllocation(uint32_t sizeIncrement,
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptorHandle,
+ D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptorHandle);
+ ~DescriptorHeapAllocation() = default;
+
+ D3D12_CPU_DESCRIPTOR_HANDLE GetCPUHandle(uint32_t offset) const;
+ D3D12_GPU_DESCRIPTOR_HANDLE GetGPUHandle(uint32_t offset) const;
+
+ bool IsInvalid() const;
+
+ private:
+ uint32_t mSizeIncrement;
+
+ D3D12_CPU_DESCRIPTOR_HANDLE mBaseCPUDescriptorHandle = {0};
+ D3D12_GPU_DESCRIPTOR_HANDLE mBaseGPUDescriptorHandle = {0};
+ };
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_DESCRIPTORHEAPALLOCATIOND3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index 0e1ea4790b2..f0e8b469194 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "dawn_native/BackendConnection.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/ErrorData.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
@@ -31,9 +32,11 @@
#include "dawn_native/d3d12/PlatformFunctions.h"
#include "dawn_native/d3d12/QueueD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
+#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/SwapChainD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
@@ -61,6 +64,10 @@ namespace dawn_native { namespace d3d12 {
CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
"D3D12 create command queue"));
+ // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
+ // value.
+ mCommandQueue.As(&mD3d12SharingContract);
+
DAWN_TRY(CheckHRESULT(mD3d12Device->CreateFence(mLastSubmittedSerial, D3D12_FENCE_FLAG_NONE,
IID_PPV_ARGS(&mFence)),
"D3D12 create fence"));
@@ -71,7 +78,13 @@ namespace dawn_native { namespace d3d12 {
// Initialize backend services
mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
mDescriptorHeapAllocator = std::make_unique<DescriptorHeapAllocator>(this);
+
+ mShaderVisibleDescriptorAllocator =
+ std::make_unique<ShaderVisibleDescriptorAllocator>(this);
+ DAWN_TRY(mShaderVisibleDescriptorAllocator->Initialize());
+
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
+ mResidencyManager = std::make_unique<ResidencyManager>(this);
mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
DAWN_TRY(NextSerial());
@@ -104,34 +117,7 @@ namespace dawn_native { namespace d3d12 {
}
Device::~Device() {
- // Immediately forget about all pending commands
- mPendingCommands.Release();
-
- ConsumedError(NextSerial());
- // Wait for all in-flight commands to finish executing
- ConsumedError(WaitForSerial(mLastSubmittedSerial));
-
- // Call tick one last time so resources are cleaned up. Ignore the return value so we can
- // continue shutting down in an orderly fashion.
- ConsumedError(TickImpl());
-
- // Free services explicitly so that they can free D3D12 resources before destruction of the
- // device.
- mDynamicUploader = nullptr;
-
- // GPU is no longer executing commands. Existing objects do not get freed until the device
- // is destroyed. To ensure objects are always released, force the completed serial to be
- // MAX.
- mCompletedSerial = std::numeric_limits<Serial>::max();
-
- if (mFenceEvent != nullptr) {
- ::CloseHandle(mFenceEvent);
- }
-
- mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
-
- ASSERT(mUsedComObjectRefs.Empty());
- ASSERT(!mPendingCommands.IsOpen());
+ BaseDestructor();
}
ComPtr<ID3D12Device> Device::GetD3D12Device() const {
@@ -142,6 +128,10 @@ namespace dawn_native { namespace d3d12 {
return mCommandQueue;
}
+ ID3D12SharingContract* Device::GetSharingContract() const {
+ return mD3d12SharingContract.Get();
+ }
+
ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
return mDispatchIndirectSignature;
}
@@ -174,6 +164,10 @@ namespace dawn_native { namespace d3d12 {
return mCommandAllocatorManager.get();
}
+ ResidencyManager* Device::GetResidencyManager() const {
+ return mResidencyManager.get();
+ }
+
ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
// Callers of GetPendingCommandList do so to record commands. Only reserve a command
// allocator when it is needed so we don't submit empty command lists
@@ -205,7 +199,7 @@ namespace dawn_native { namespace d3d12 {
mResourceAllocatorManager->Tick(mCompletedSerial);
DAWN_TRY(mCommandAllocatorManager->Tick(mCompletedSerial));
- mDescriptorHeapAllocator->Deallocate(mCompletedSerial);
+ mShaderVisibleDescriptorAllocator->Tick(mCompletedSerial);
mMapRequestTracker->Tick(mCompletedSerial);
mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
DAWN_TRY(ExecutePendingCommandContext());
@@ -234,12 +228,12 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError Device::ExecutePendingCommandContext() {
- return mPendingCommands.ExecuteCommandList(mCommandQueue.Get());
+ return mPendingCommands.ExecuteCommandList(this);
}
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
- return new BindGroup(this, descriptor);
+ return BindGroup::Create(this, descriptor);
}
ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
@@ -256,7 +250,7 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
- return new ComputePipeline(this, descriptor);
+ return ComputePipeline::Create(this, descriptor);
}
ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
@@ -280,6 +274,12 @@ namespace dawn_native { namespace d3d12 {
const SwapChainDescriptor* descriptor) {
return new SwapChain(this, descriptor);
}
+ ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
+ }
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return Texture::Create(this, descriptor);
}
@@ -304,12 +304,13 @@ namespace dawn_native { namespace d3d12 {
CommandRecordingContext* commandRecordingContext;
DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
- ToBackend(destination)
- ->TransitionUsageNow(commandRecordingContext, wgpu::BufferUsage::CopyDst);
+ Buffer* dstBuffer = ToBackend(destination);
+ StagingBuffer* srcBuffer = ToBackend(source);
+ dstBuffer->TrackUsageAndTransitionNow(commandRecordingContext, wgpu::BufferUsage::CopyDst);
commandRecordingContext->GetCommandList()->CopyBufferRegion(
- ToBackend(destination)->GetD3D12Resource().Get(), destinationOffset,
- ToBackend(source)->GetResource(), sourceOffset, size);
+ dstBuffer->GetD3D12Resource().Get(), destinationOffset, srcBuffer->GetResource(),
+ sourceOffset, size);
return {};
}
@@ -326,11 +327,13 @@ namespace dawn_native { namespace d3d12 {
initialUsage);
}
- TextureBase* Device::WrapSharedHandle(const TextureDescriptor* descriptor,
+ TextureBase* Device::WrapSharedHandle(const ExternalImageDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey) {
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture) {
TextureBase* dawnTexture;
- if (ConsumedError(Texture::Create(this, descriptor, sharedHandle, acquireMutexKey),
+ if (ConsumedError(Texture::Create(this, descriptor, sharedHandle, acquireMutexKey,
+ isSwapChainTexture),
&dawnTexture))
return nullptr;
@@ -415,6 +418,52 @@ namespace dawn_native { namespace d3d12 {
const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+ SetToggle(Toggle::UseD3D12ResidencyManagement, false);
+
+ // By default use the maximum shader-visible heap size allowed.
+ SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
}
+ MaybeError Device::WaitForIdleForDestruction() {
+ // Immediately forget about all pending commands
+ mPendingCommands.Release();
+
+ DAWN_TRY(NextSerial());
+ // Wait for all in-flight commands to finish executing
+ DAWN_TRY(WaitForSerial(mLastSubmittedSerial));
+
+ // Call tick one last time so resources are cleaned up.
+ DAWN_TRY(TickImpl());
+
+ return {};
+ }
+
+ void Device::Destroy() {
+ ASSERT(mLossStatus != LossStatus::AlreadyLost);
+
+ // Immediately forget about all pending commands
+ mPendingCommands.Release();
+
+ // Free services explicitly so that they can free D3D12 resources before destruction of the
+ // device.
+ mDynamicUploader = nullptr;
+
+ // GPU is no longer executing commands. Existing objects do not get freed until the device
+ // is destroyed. To ensure objects are always released, force the completed serial to be
+ // MAX.
+ mCompletedSerial = std::numeric_limits<Serial>::max();
+
+ if (mFenceEvent != nullptr) {
+ ::CloseHandle(mFenceEvent);
+ }
+
+ mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
+
+ ASSERT(mUsedComObjectRefs.Empty());
+ ASSERT(!mPendingCommands.IsOpen());
+ }
+
+ ShaderVisibleDescriptorAllocator* Device::GetShaderVisibleDescriptorAllocator() const {
+ return mShaderVisibleDescriptorAllocator.get();
+ }
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 2740e039bd2..1f0c42f7cc8 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -30,9 +30,11 @@ namespace dawn_native { namespace d3d12 {
class CommandAllocatorManager;
class DescriptorHeapAllocator;
+ class ShaderVisibleDescriptorAllocator;
class MapRequestTracker;
class PlatformFunctions;
class ResourceAllocatorManager;
+ class ResidencyManager;
#define ASSERT_SUCCESS(hr) \
{ \
@@ -57,6 +59,7 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Device> GetD3D12Device() const;
ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
+ ID3D12SharingContract* GetSharingContract() const;
ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
@@ -65,6 +68,7 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapAllocator* GetDescriptorHeapAllocator() const;
MapRequestTracker* GetMapRequestTracker() const;
CommandAllocatorManager* GetCommandAllocatorManager() const;
+ ResidencyManager* GetResidencyManager() const;
const PlatformFunctions* GetFunctions() const;
ComPtr<IDXGIFactory4> GetFactory() const;
@@ -95,9 +99,12 @@ namespace dawn_native { namespace d3d12 {
void DeallocateMemory(ResourceHeapAllocation& allocation);
- TextureBase* WrapSharedHandle(const TextureDescriptor* descriptor,
+ ShaderVisibleDescriptorAllocator* GetShaderVisibleDescriptorAllocator() const;
+
+ TextureBase* WrapSharedHandle(const ExternalImageDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey);
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture);
ResultOrError<ComPtr<IDXGIKeyedMutex>> CreateKeyedMutexForTexture(
ID3D12Resource* d3d12Resource);
void ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex);
@@ -122,11 +129,18 @@ namespace dawn_native { namespace d3d12 {
const ShaderModuleDescriptor* descriptor) override;
ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
+ ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
ResultOrError<TextureBase*> CreateTextureImpl(const TextureDescriptor* descriptor) override;
ResultOrError<TextureViewBase*> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
+ void Destroy() override;
+ MaybeError WaitForIdleForDestruction() override;
+
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
ComPtr<ID3D12Fence> mFence;
@@ -134,6 +148,7 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
ComPtr<ID3D12CommandQueue> mCommandQueue;
+ ComPtr<ID3D12SharingContract> mD3d12SharingContract;
// 11on12 device and device context corresponding to mCommandQueue
ComPtr<ID3D11On12Device> mD3d11On12Device;
@@ -151,8 +166,8 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<DescriptorHeapAllocator> mDescriptorHeapAllocator;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
-
- dawn_native::PCIInfo mPCIInfo;
+ std::unique_ptr<ResidencyManager> mResidencyManager;
+ std::unique_ptr<ShaderVisibleDescriptorAllocator> mShaderVisibleDescriptorAllocator;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
index ade12e3ac86..1143a4a5acb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/Forward.h
@@ -26,6 +26,7 @@ namespace dawn_native { namespace d3d12 {
class CommandBuffer;
class ComputePipeline;
class Device;
+ class Heap;
class PipelineLayout;
class Queue;
class RenderPipeline;
@@ -47,6 +48,7 @@ namespace dawn_native { namespace d3d12 {
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
+ using ResourceHeapType = Heap;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using StagingBufferType = StagingBuffer;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
index e16f380110b..ced2dd1c5b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
@@ -16,6 +16,7 @@
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -41,12 +42,22 @@ namespace dawn_native { namespace d3d12 {
heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
heapDesc.Flags = mHeapFlags;
- ComPtr<ID3D12Heap> heap;
+ // CreateHeap will implicitly make the created heap resident. We must ensure enough free
+ // memory exists before allocating to avoid an out-of-memory error when overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanMakeResident(size));
+
+ ComPtr<ID3D12Heap> d3d12Heap;
DAWN_TRY(CheckOutOfMemoryHRESULT(
- mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&heap)),
+ mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
"ID3D12Device::CreateHeap"));
- return {std::make_unique<Heap>(std::move(heap))};
+ std::unique_ptr<ResourceHeapBase> heapBase =
+ std::make_unique<Heap>(std::move(d3d12Heap), heapDesc.Properties.Type, size);
+
+ // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
+ // avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
+ return heapBase;
}
void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
index 2e35bdf7cf7..112702b87f2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
@@ -15,11 +15,77 @@
#include "dawn_native/d3d12/HeapD3D12.h"
namespace dawn_native { namespace d3d12 {
+ Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, D3D12_HEAP_TYPE d3d12HeapType, uint64_t size)
+ : mD3d12Pageable(std::move(d3d12Pageable)), mD3d12HeapType(d3d12HeapType), mSize(size) {
+ }
- Heap::Heap(ComPtr<ID3D12Heap> heap) : mHeap(std::move(heap)) {
+ Heap::~Heap() {
+ // When a heap is destroyed, it no longer resides in resident memory, so we must evict it
+ // from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+ // ResidencyManager will attempt to use it after it has been deallocated.
+ if (IsInResidencyLRUCache()) {
+ RemoveFromList();
+ }
}
+ // This function should only be used when mD3D12Pageable was initialized from a ID3D12Pageable
+ // that was initially created as an ID3D12Heap (i.e. SubAllocation). If the ID3D12Pageable was
+ // initially created as an ID3D12Resource (i.e. DirectAllocation), then use GetD3D12Pageable().
ComPtr<ID3D12Heap> Heap::GetD3D12Heap() const {
- return mHeap;
+ ComPtr<ID3D12Heap> heap;
+ HRESULT result = mD3d12Pageable.As(&heap);
+ ASSERT(SUCCEEDED(result));
+ return heap;
+ }
+
+ ComPtr<ID3D12Pageable> Heap::GetD3D12Pageable() const {
+ return mD3d12Pageable;
+ }
+
+ D3D12_HEAP_TYPE Heap::GetD3D12HeapType() const {
+ return mD3d12HeapType;
+ }
+
+ Serial Heap::GetLastUsage() const {
+ return mLastUsage;
+ }
+
+ void Heap::SetLastUsage(Serial serial) {
+ mLastUsage = serial;
+ }
+
+ uint64_t Heap::GetLastSubmission() const {
+ return mLastSubmission;
+ }
+
+ void Heap::SetLastSubmission(Serial serial) {
+ mLastSubmission = serial;
}
+
+ uint64_t Heap::GetSize() const {
+ return mSize;
+ }
+
+ bool Heap::IsInResidencyLRUCache() const {
+ return IsInList();
+ }
+
+ void Heap::IncrementResidencyLock() {
+ ASSERT(mD3d12HeapType != D3D12_HEAP_TYPE_DEFAULT);
+ mResidencyLockRefCount++;
+ }
+
+ void Heap::DecrementResidencyLock() {
+ ASSERT(mD3d12HeapType != D3D12_HEAP_TYPE_DEFAULT);
+ mResidencyLockRefCount--;
+ }
+
+ bool Heap::IsResidencyLocked() const {
+ if (mResidencyLockRefCount == 0) {
+ return false;
+ }
+
+ return true;
+ }
+
}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
index 834e42ac9fb..de1e205907f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
@@ -15,20 +15,59 @@
#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
#define DAWNNATIVE_D3D12_HEAPD3D12_H_
+#include "common/LinkedList.h"
+#include "common/Serial.h"
#include "dawn_native/ResourceHeap.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
- class Heap : public ResourceHeapBase {
+ // This class is used to represent heap allocations, but also serves as a node within the
+ // ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+ // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+ // LRU cache when it is evicted from resident memory due to budget constraints, or when the heap
+ // is destroyed.
+ class Heap : public ResourceHeapBase, public LinkNode<Heap> {
public:
- Heap(ComPtr<ID3D12Heap> heap);
- ~Heap() = default;
+ Heap(ComPtr<ID3D12Pageable> d3d12Pageable, D3D12_HEAP_TYPE heapType, uint64_t size);
+ ~Heap();
ComPtr<ID3D12Heap> GetD3D12Heap() const;
+ ComPtr<ID3D12Pageable> GetD3D12Pageable() const;
+ D3D12_HEAP_TYPE GetD3D12HeapType() const;
+
+ // We set mLastRecordingSerial to denote the serial this heap was last recorded to be used.
+ // We must check this serial against the current serial when recording heap usages to ensure
+ // we do not process residency for this heap multiple times.
+ Serial GetLastUsage() const;
+ void SetLastUsage(Serial serial);
+
+ // The residency manager must know the last serial that any portion of the heap was
+ // submitted to be used so that we can ensure this heap stays resident in memory at least
+ // until that serial has completed.
+ uint64_t GetLastSubmission() const;
+ void SetLastSubmission(Serial serial);
+
+ uint64_t GetSize() const;
+
+ bool IsInResidencyLRUCache() const;
+
+ // In some scenarios, such as async buffer mapping, we must lock residency to ensure the
+ // heap cannot be evicted. Because multiple buffers may be mapped in a single heap, we must
+ // track the number of resources currently locked.
+ void IncrementResidencyLock();
+ void DecrementResidencyLock();
+ bool IsResidencyLocked() const;
private:
- ComPtr<ID3D12Heap> mHeap;
+ ComPtr<ID3D12Pageable> mD3d12Pageable;
+ D3D12_HEAP_TYPE mD3d12HeapType;
+ // mLastUsage denotes the last time this heap was recorded for use.
+ Serial mLastUsage = 0;
+ // mLastSubmission denotes the last time this heap was submitted to the GPU.
+ Serial mLastSubmission = 0;
+ uint32_t mResidencyLockRefCount = 0;
+ uint64_t mSize = 0;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 2cdd6cf1db7..868e0833470 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -46,10 +46,13 @@ namespace dawn_native { namespace d3d12 {
return D3D12_ROOT_PARAMETER_TYPE_CBV;
case wgpu::BindingType::StorageBuffer:
return D3D12_ROOT_PARAMETER_TYPE_UAV;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ return D3D12_ROOT_PARAMETER_TYPE_SRV;
case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
}
}
@@ -86,7 +89,6 @@ namespace dawn_native { namespace d3d12 {
for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
- const BindGroupLayout::LayoutBindingInfo& groupInfo = bindGroupLayout->GetBindingInfo();
// Set the root descriptor table parameter and copy ranges. Ranges are offset by the
// bind group index Returns whether or not the parameter was set. A root parameter is
@@ -126,25 +128,30 @@ namespace dawn_native { namespace d3d12 {
// Get calculated shader register for root descriptors
const auto& shaderRegisters = bindGroupLayout->GetBindingOffsets();
- // Init root descriptors in root signatures.
- for (uint32_t dynamicBinding : IterateBitSet(groupInfo.hasDynamicOffset)) {
+ // Init root descriptors in root signatures for dynamic buffer bindings.
+ // These are packed at the beginning of the layout binding info.
+ for (BindingIndex dynamicBindingIndex = 0;
+ dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
+ ++dynamicBindingIndex) {
+ const BindingInfo& bindingInfo =
+ bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
+
D3D12_ROOT_PARAMETER* rootParameter = &rootParameters[parameterIndex];
// Setup root descriptor.
D3D12_ROOT_DESCRIPTOR rootDescriptor;
- rootDescriptor.ShaderRegister = shaderRegisters[dynamicBinding];
+ rootDescriptor.ShaderRegister = shaderRegisters[dynamicBindingIndex];
rootDescriptor.RegisterSpace = group;
// Set root descriptors in root signatures.
rootParameter->Descriptor = rootDescriptor;
- mDynamicRootParameterIndices[group][dynamicBinding] = parameterIndex++;
+ mDynamicRootParameterIndices[group][dynamicBindingIndex] = parameterIndex++;
// Set parameter types according to bind group layout descriptor.
- rootParameter->ParameterType = RootParameterType(groupInfo.types[dynamicBinding]);
+ rootParameter->ParameterType = RootParameterType(bindingInfo.type);
// Set visibilities according to bind group layout descriptor.
- rootParameter->ShaderVisibility =
- ShaderVisibilityType(groupInfo.visibilities[dynamicBinding]);
+ rootParameter->ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
}
}
@@ -183,10 +190,11 @@ namespace dawn_native { namespace d3d12 {
return mRootSignature;
}
- uint32_t PipelineLayout::GetDynamicRootParameterIndex(uint32_t group, uint32_t binding) const {
+ uint32_t PipelineLayout::GetDynamicRootParameterIndex(uint32_t group,
+ BindingIndex bindingIndex) const {
ASSERT(group < kMaxBindGroups);
- ASSERT(binding < kMaxBindingsPerGroup);
- ASSERT(GetBindGroupLayout(group)->GetBindingInfo().hasDynamicOffset[binding]);
- return mDynamicRootParameterIndices[group][binding];
+ ASSERT(bindingIndex < kMaxBindingsPerGroup);
+ ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).hasDynamicOffset);
+ return mDynamicRootParameterIndices[group][bindingIndex];
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index 1d6c7e56b9d..5b711377621 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
#define DAWNNATIVE_D3D12_PIPELINELAYOUTD3D12_H_
+#include "dawn_native/BindingInfo.h"
#include "dawn_native/PipelineLayout.h"
-
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -32,7 +32,7 @@ namespace dawn_native { namespace d3d12 {
uint32_t GetSamplerRootParameterIndex(uint32_t group) const;
// Returns the index of the root parameter reserved for a dynamic buffer binding
- uint32_t GetDynamicRootParameterIndex(uint32_t group, uint32_t binding) const;
+ uint32_t GetDynamicRootParameterIndex(uint32_t group, BindingIndex bindingIndex) const;
ComPtr<ID3D12RootSignature> GetRootSignature() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
index ae25dc1934a..1e79f88c930 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
@@ -45,7 +45,7 @@ namespace dawn_native { namespace d3d12 {
"D3D12SerializeVersionedRootSignature", &error) ||
!mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
"D3D12CreateVersionedRootSignatureDeserializer", &error)) {
- return DAWN_DEVICE_LOST_ERROR(error.c_str());
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
return {};
@@ -55,7 +55,7 @@ namespace dawn_native { namespace d3d12 {
std::string error;
if (!mD3D11Lib.Open("d3d11.dll", &error) ||
!mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
- return DAWN_DEVICE_LOST_ERROR(error.c_str());
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
return {};
@@ -66,7 +66,7 @@ namespace dawn_native { namespace d3d12 {
if (!mDXGILib.Open("dxgi.dll", &error) ||
!mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
!mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
- return DAWN_DEVICE_LOST_ERROR(error.c_str());
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
return {};
@@ -76,7 +76,7 @@ namespace dawn_native { namespace d3d12 {
std::string error;
if (!mD3DCompilerLib.Open("d3dcompiler_47.dll", &error) ||
!mD3DCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error)) {
- return DAWN_DEVICE_LOST_ERROR(error.c_str());
+ return DAWN_INTERNAL_ERROR(error.c_str());
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
index c563c5028bd..710e41f052c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
@@ -36,7 +36,7 @@ namespace dawn_native { namespace d3d12 {
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
"CommandBufferD3D12::RecordCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext, i));
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
}
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
"CommandBufferD3D12::RecordCommands");
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index 89c9ed514ce..830860be8e6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -337,7 +337,8 @@ namespace dawn_native { namespace d3d12 {
break;
}
- const std::string hlslSource = module->GetHLSLSource(ToBackend(GetLayout()));
+ std::string hlslSource;
+ DAWN_TRY_ASSIGN(hlslSource, module->GetHLSLSource(ToBackend(GetLayout())));
const PlatformFunctions* functions = device->GetFunctions();
if (FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
index affd5fe7336..73408720c67 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.h
@@ -27,6 +27,7 @@ namespace dawn_native { namespace d3d12 {
public:
static ResultOrError<RenderPipeline*> Create(Device* device,
const RenderPipelineDescriptor* descriptor);
+ RenderPipeline() = delete;
~RenderPipeline();
D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
new file mode 100644
index 00000000000..3970fdc589a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
@@ -0,0 +1,278 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
+
+#include "dawn_native/d3d12/AdapterD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/Forward.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ ResidencyManager::ResidencyManager(Device* device)
+ : mDevice(device),
+ mResidencyManagementEnabled(
+ device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
+ UpdateVideoMemoryInfo();
+ }
+
+ // Increments number of locks on a heap to ensure the heap remains resident.
+ MaybeError ResidencyManager::LockMappableHeap(Heap* heap) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ // Depending on device architecture, the heap may not need tracked.
+ if (!ShouldTrackHeap(heap)) {
+ return {};
+ }
+
+ // If the heap isn't already resident, make it resident.
+ if (!heap->IsInResidencyLRUCache() && !heap->IsResidencyLocked()) {
+ DAWN_TRY(EnsureCanMakeResident(heap->GetSize()));
+ ID3D12Pageable* pageable = heap->GetD3D12Pageable().Get();
+ DAWN_TRY(
+ CheckHRESULT(mDevice->GetD3D12Device()->MakeResident(1, &pageable),
+ "Making a heap resident due to an underlying resource being mapped."));
+ }
+
+ // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
+ if (heap->IsInResidencyLRUCache()) {
+ heap->RemoveFromList();
+ }
+
+ heap->IncrementResidencyLock();
+
+ return {};
+ }
+
+ // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
+ // inserted into the LRU cache and becomes eligible for eviction.
+ void ResidencyManager::UnlockMappableHeap(Heap* heap) {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ // Depending on device architecture, the heap may not need tracked.
+ if (!ShouldTrackHeap(heap)) {
+ return;
+ }
+
+ ASSERT(heap->IsResidencyLocked());
+ ASSERT(!heap->IsInResidencyLRUCache());
+ heap->DecrementResidencyLock();
+
+ // When all locks have been removed, the resource remains resident and becomes tracked in
+ // the LRU.
+ if (!heap->IsResidencyLocked()) {
+ mLRUCache.Append(heap);
+ }
+ }
+
+ // Allows an application component external to Dawn to cap Dawn's residency budget to prevent
+ // competition for device local memory. Returns the amount of memory reserved, which may be less
+ // that the requested reservation when under pressure.
+ uint64_t ResidencyManager::SetExternalMemoryReservation(uint64_t requestedReservationSize) {
+ mVideoMemoryInfo.externalRequest = requestedReservationSize;
+ UpdateVideoMemoryInfo();
+ return mVideoMemoryInfo.externalReservation;
+ }
+
+ void ResidencyManager::UpdateVideoMemoryInfo() {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
+ ToBackend(mDevice->GetAdapter())
+ ->GetHardwareAdapter()
+ ->QueryVideoMemoryInfo(0, DXGI_MEMORY_SEGMENT_GROUP_LOCAL, &queryVideoMemoryInfo);
+
+ // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
+ // system, and may be lower than expected in certain scenarios. Under memory pressure, we
+ // cap the external reservation to half the available budget, which prevents the external
+ // component from consuming a disproportionate share of memory and ensures that Dawn can
+ // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
+ // and subject to future experimentation.
+ mVideoMemoryInfo.externalReservation =
+ std::min(queryVideoMemoryInfo.Budget / 2, mVideoMemoryInfo.externalReservation);
+
+ // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
+ // decreases fluctuations in the operating-system-defined budget, which improves stability
+ // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
+ // chosen and subject to future experimentation.
+ static constexpr float kBudgetCap = 0.95;
+ mVideoMemoryInfo.dawnBudget =
+ (queryVideoMemoryInfo.Budget - mVideoMemoryInfo.externalReservation) * kBudgetCap;
+ mVideoMemoryInfo.dawnUsage =
+ queryVideoMemoryInfo.CurrentUsage - mVideoMemoryInfo.externalReservation;
+ }
+
+ // Removes from the LRU and returns the least recently used heap when possible. Returns nullptr
+ // when nothing further can be evicted.
+ ResultOrError<Heap*> ResidencyManager::RemoveSingleEntryFromLRU() {
+ ASSERT(!mLRUCache.empty());
+ Heap* heap = mLRUCache.head()->value();
+ Serial lastSubmissionSerial = heap->GetLastSubmission();
+
+ // If the next candidate for eviction was inserted into the LRU during the current serial,
+ // it is because more memory is being used in a single command list than is available.
+ // In this scenario, we cannot make any more resources resident and thrashing must occur.
+ if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
+ return nullptr;
+ }
+
+ // We must ensure that any previous use of a resource has completed before the resource can
+ // be evicted.
+ if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
+ DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
+ }
+
+ heap->RemoveFromList();
+ return heap;
+ }
+
+ // Any time we need to make something resident in local memory, we must check that we have
+ // enough free memory to make the new object resident while also staying within our budget.
+ // If there isn't enough memory, we should evict until there is.
+ MaybeError ResidencyManager::EnsureCanMakeResident(uint64_t sizeToMakeResident) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ UpdateVideoMemoryInfo();
+
+ uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + mVideoMemoryInfo.dawnUsage;
+
+ // Return when we can call MakeResident and remain under budget.
+ if (memoryUsageAfterMakeResident < mVideoMemoryInfo.dawnBudget) {
+ return {};
+ }
+
+ std::vector<ID3D12Pageable*> resourcesToEvict;
+
+ uint64_t sizeEvicted = 0;
+ while (sizeEvicted < sizeToMakeResident) {
+ Heap* heap;
+ DAWN_TRY_ASSIGN(heap, RemoveSingleEntryFromLRU());
+
+ // If no heap was returned, then nothing more can be evicted.
+ if (heap == nullptr) {
+ break;
+ }
+
+ sizeEvicted += heap->GetSize();
+ resourcesToEvict.push_back(heap->GetD3D12Pageable().Get());
+ }
+
+ if (resourcesToEvict.size() > 0) {
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
+ "Evicting resident heaps to free device local memory"));
+ }
+
+ return {};
+ }
+
+ // Ensure that we are only tracking heaps that exist in DXGI_MEMORY_SEGMENT_LOCAL.
+ bool ResidencyManager::ShouldTrackHeap(Heap* heap) const {
+ D3D12_HEAP_PROPERTIES heapProperties =
+ mDevice->GetD3D12Device()->GetCustomHeapProperties(0, heap->GetD3D12HeapType());
+
+ if (mDevice->GetDeviceInfo().isUMA) {
+ // On UMA devices, MEMORY_POOL_L0 corresponds to MEMORY_SEGMENT_LOCAL, so we must track
+ // heaps in MEMORY_POOL_L0. For UMA, all heaps types exist in MEMORY_POOL_L0.
+ return heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L0;
+ }
+
+ // On non-UMA devices, MEMORY_POOL_L1 corresponds to MEMORY_SEGMENT_LOCAL, so only track the
+ // heap if it is in MEMORY_POOL_L1. For non-UMA, DEFAULT heaps exist in MEMORY_POOL_L1,
+ // while READBACK and UPLOAD heaps exist in MEMORY_POOL_L0.
+ return heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1;
+ }
+
+ // Given a list of heaps that are pending usage, this function will estimate memory needed,
+ // evict resources until enough space is available, then make resident any heaps scheduled for
+ // usage.
+ MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
+ if (!mResidencyManagementEnabled) {
+ return {};
+ }
+
+ std::vector<ID3D12Pageable*> heapsToMakeResident;
+ uint64_t sizeToMakeResident = 0;
+
+ Serial pendingCommandSerial = mDevice->GetPendingCommandSerial();
+ for (size_t i = 0; i < heapCount; i++) {
+ Heap* heap = heaps[i];
+
+ // Depending on device architecture, the heap may not need tracked.
+ if (!ShouldTrackHeap(heap)) {
+ continue;
+ }
+
+ // Heaps that are locked resident are not tracked in the LRU cache.
+ if (heap->IsResidencyLocked()) {
+ continue;
+ }
+
+ if (heap->IsInResidencyLRUCache()) {
+ // If the heap is already in the LRU, we must remove it and append again below to
+ // update its position in the LRU.
+ heap->RemoveFromList();
+ } else {
+ heapsToMakeResident.push_back(heap->GetD3D12Pageable().Get());
+ sizeToMakeResident += heap->GetSize();
+ }
+
+ mLRUCache.Append(heap);
+ heap->SetLastSubmission(pendingCommandSerial);
+ }
+
+ if (heapsToMakeResident.size() != 0) {
+ DAWN_TRY(EnsureCanMakeResident(sizeToMakeResident));
+
+ // Note that MakeResident is a synchronous function and can add a significant
+ // overhead to command recording. In the future, it may be possible to decrease this
+ // overhead by using MakeResident on a secondary thread, or by instead making use of
+ // the EnqueueMakeResident function (which is not available on all Windows 10
+ // platforms).
+ DAWN_TRY(CheckHRESULT(mDevice->GetD3D12Device()->MakeResident(
+ heapsToMakeResident.size(), heapsToMakeResident.data()),
+ "Making scheduled-to-be-used resources resident in "
+ "device local memory"));
+ }
+
+ return {};
+ }
+
+ // When a new heap is allocated, the heap will be made resident upon creation. We must track
+ // when this happens to avoid calling MakeResident a second time.
+ void ResidencyManager::TrackResidentAllocation(Heap* heap) {
+ if (!mResidencyManagementEnabled) {
+ return;
+ }
+
+ // Depending on device architecture and heap type, the heap may not need tracked.
+ if (!ShouldTrackHeap(heap)) {
+ return;
+ }
+
+ mLRUCache.Append(heap);
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
new file mode 100644
index 00000000000..71c7fae5895
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+#define DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
+
+#include "common/LinkedList.h"
+#include "common/Serial.h"
+#include "dawn_native/Error.h"
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ class Device;
+ class Heap;
+
+ class ResidencyManager {
+ public:
+ ResidencyManager(Device* device);
+
+ MaybeError LockMappableHeap(Heap* heap);
+ void UnlockMappableHeap(Heap* heap);
+ MaybeError EnsureCanMakeResident(uint64_t allocationSize);
+ MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
+
+ uint64_t SetExternalMemoryReservation(uint64_t requestedReservationSize);
+
+ void TrackResidentAllocation(Heap* heap);
+
+ private:
+ struct VideoMemoryInfo {
+ uint64_t dawnBudget;
+ uint64_t dawnUsage;
+ uint64_t externalReservation;
+ uint64_t externalRequest;
+ };
+ ResultOrError<Heap*> RemoveSingleEntryFromLRU();
+ bool ShouldTrackHeap(Heap* heap) const;
+ void UpdateVideoMemoryInfo();
+
+ Device* mDevice;
+ LinkedList<Heap> mLRUCache;
+ bool mResidencyManagementEnabled = false;
+ VideoMemoryInfo mVideoMemoryInfo = {};
+ };
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
index 5200a145c31..3176f5bd8a1 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -18,6 +18,7 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
namespace {
@@ -87,7 +88,8 @@ namespace dawn_native { namespace d3d12 {
default:
UNREACHABLE();
}
- } break;
+ break;
+ }
case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
@@ -96,18 +98,41 @@ namespace dawn_native { namespace d3d12 {
if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
(flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
return Default_OnlyRenderableOrDepthTextures;
- } else {
- return Default_OnlyNonRenderableOrDepthTextures;
}
- } break;
+ return Default_OnlyNonRenderableOrDepthTextures;
+ }
+
default:
UNREACHABLE();
}
- } break;
+ break;
+ }
default:
UNREACHABLE();
}
}
+
+ uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
+ uint32_t sampleCount,
+ uint64_t requestedAlignment) {
+ switch (resourceHeapKind) {
+ // Small resources can take advantage of smaller alignments. For example,
+ // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+ // Must be non-depth or without render-target to use small resource alignment.
+ // This also applies to MSAA textures (4MB => 64KB).
+ //
+ // Note: Only known to be used for small textures; however, MSDN suggests
+ // it could be extended for more cases. If so, this could default to always
+ // attempt small resource placement.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
+ : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
+ default:
+ return requestedAlignment;
+ }
+ }
+
} // namespace
ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
@@ -165,6 +190,13 @@ namespace dawn_native { namespace d3d12 {
mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+ // Directly allocated ResourceHeapAllocations are created with a heap object that must be
+ // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
+ // for more information.
+ if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
+ delete allocation.GetResourceHeap();
+ }
+
// Invalidate the allocation immediately in case one accidentally
// calls DeallocateMemory again using the same allocation.
allocation.Invalidate();
@@ -191,38 +223,31 @@ namespace dawn_native { namespace d3d12 {
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
D3D12_RESOURCE_STATES initialUsage) {
- const size_t resourceHeapKindIndex =
+ const ResourceHeapKind resourceHeapKind =
GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
requestedResourceDescriptor.Flags, mResourceHeapTier);
- // Small resources can take advantage of smaller alignments. For example,
- // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
- // Must be non-depth or without render-target to use small resource alignment.
- //
- // Note: Only known to be used for small textures; however, MSDN suggests
- // it could be extended for more cases. If so, this could default to always attempt small
- // resource placement.
- // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
- resourceDescriptor.Alignment =
- (resourceHeapKindIndex == Default_OnlyNonRenderableOrDepthTextures)
- ? D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
- : requestedResourceDescriptor.Alignment;
+ resourceDescriptor.Alignment = GetResourcePlacementAlignment(
+ resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
+ requestedResourceDescriptor.Alignment);
D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
- // If the request for small resource alignment was rejected, let D3D tell us what the
+ // If the requested resource alignment was rejected, let D3D tell us what the
// required alignment is for this resource.
- if (resourceHeapKindIndex == Default_OnlyNonRenderableOrDepthTextures &&
- resourceInfo.Alignment != D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT) {
+ if (resourceDescriptor.Alignment != resourceInfo.Alignment) {
resourceDescriptor.Alignment = 0;
resourceInfo =
mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
}
+ if (resourceInfo.SizeInBytes == 0) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+ }
BuddyMemoryAllocator* allocator =
- mSubAllocatedResourceAllocators[resourceHeapKindIndex].get();
+ mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
ResourceMemoryAllocation allocation;
DAWN_TRY_ASSIGN(allocation,
@@ -231,7 +256,11 @@ namespace dawn_native { namespace d3d12 {
return ResourceHeapAllocation{}; // invalid
}
- ID3D12Heap* heap = static_cast<Heap*>(allocation.GetResourceHeap())->GetD3D12Heap().Get();
+ Heap* heap = ToBackend(allocation.GetResourceHeap());
+
+ // Before calling CreatePlacedResource, we must ensure the target heap is resident.
+ // CreatePlacedResource will fail if it is not.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureHeapsAreResident(&heap, 1));
// With placed resources, a single heap can be reused.
// The resource placed at an offset is only reclaimed
@@ -241,13 +270,14 @@ namespace dawn_native { namespace d3d12 {
// barrier).
// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
ComPtr<ID3D12Resource> placedResource;
- DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreatePlacedResource(
- heap, allocation.GetOffset(), &resourceDescriptor,
- initialUsage, nullptr, IID_PPV_ARGS(&placedResource)),
- "ID3D12Device::CreatePlacedResource"));
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreatePlacedResource(
+ heap->GetD3D12Heap().Get(), allocation.GetOffset(), &resourceDescriptor,
+ initialUsage, nullptr, IID_PPV_ARGS(&placedResource)),
+ "ID3D12Device::CreatePlacedResource"));
return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
- std::move(placedResource)};
+ std::move(placedResource), heap};
}
ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
@@ -261,6 +291,23 @@ namespace dawn_native { namespace d3d12 {
heapProperties.CreationNodeMask = 0;
heapProperties.VisibleNodeMask = 0;
+ // If d3d tells us the resource is "zero-sized", the size is invalid and may cause a device
+ // lost (too large for driver). Instead, treat the error as a OOM.
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ if (resourceInfo.SizeInBytes == 0) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+ }
+
+ if (resourceInfo.SizeInBytes > kMaxHeapSize) {
+ return ResourceHeapAllocation{}; // Invalid
+ }
+
+ // CreateCommittedResource will implicitly make the created resource resident. We must
+ // ensure enough free memory exists before allocating to avoid an out-of-memory error when
+ // overcommitted.
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanMakeResident(resourceInfo.SizeInBytes));
+
// Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
// provided to CreateCommittedResource.
ComPtr<ID3D12Resource> committedResource;
@@ -270,11 +317,22 @@ namespace dawn_native { namespace d3d12 {
initialUsage, nullptr, IID_PPV_ARGS(&committedResource)),
"ID3D12Device::CreateCommittedResource"));
+ // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
+ // resource allocation. Because Dawn's memory residency management occurs at the resource
+ // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
+ // object. This object is created manually, and must be deleted manually upon deallocation
+ // of the committed resource.
+ Heap* heap = new Heap(committedResource, heapType, resourceInfo.SizeInBytes);
+
+ // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
+ // track this to avoid calling MakeResident a second time.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
+
AllocationInfo info;
info.mMethod = AllocationMethod::kDirect;
return ResourceHeapAllocation{info,
- /*offset*/ 0, std::move(committedResource)};
+ /*offset*/ 0, std::move(committedResource), heap};
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
index bf805cb8d0d..c3a89f0590d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -14,13 +14,17 @@
#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+
#include <utility>
namespace dawn_native { namespace d3d12 {
ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
uint64_t offset,
- ComPtr<ID3D12Resource> resource)
- : ResourceMemoryAllocation(info, offset, nullptr), mResource(std::move(resource)) {
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap)
+ : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
+ ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
}
void ResourceHeapAllocation::Invalidate() {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
index d764a9675f2..71b00fd5fd7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
@@ -20,12 +20,15 @@
namespace dawn_native { namespace d3d12 {
+ class Heap;
+
class ResourceHeapAllocation : public ResourceMemoryAllocation {
public:
ResourceHeapAllocation() = default;
ResourceHeapAllocation(const AllocationInfo& info,
uint64_t offset,
- ComPtr<ID3D12Resource> resource);
+ ComPtr<ID3D12Resource> resource,
+ Heap* heap);
~ResourceHeapAllocation() override = default;
void Invalidate() override;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 3f841d1c547..14c4f5debca 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -41,7 +41,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
mSpirv.assign(descriptor->code, descriptor->code + descriptor->codeSize);
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- shaderc_spvc::CompileOptions options;
+ shaderc_spvc::CompileOptions options = GetCompileOptions();
options.SetHLSLShaderModel(51);
// PointCoord and PointSize are not supported in HLSL
@@ -52,29 +52,25 @@ namespace dawn_native { namespace d3d12 {
options.SetHLSLPointCoordCompat(true);
options.SetHLSLPointSizeCompat(true);
- shaderc_spvc_status status =
- mSpvcContext.InitializeForHlsl(descriptor->code, descriptor->codeSize, options);
- if (status != shaderc_spvc_status_success) {
- return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
- }
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.InitializeForHlsl(descriptor->code, descriptor->codeSize, options),
+ "Unable to initialize instance of spvc"));
- spirv_cross::Compiler* compiler =
- reinterpret_cast<spirv_cross::Compiler*>(mSpvcContext.GetCompiler());
- ExtractSpirvInfo(*compiler);
+ spirv_cross::Compiler* compiler;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
+ "Unable to get cross compiler"));
+ DAWN_TRY(ExtractSpirvInfo(*compiler));
} else {
spirv_cross::CompilerHLSL compiler(descriptor->code, descriptor->codeSize);
- ExtractSpirvInfo(compiler);
+ DAWN_TRY(ExtractSpirvInfo(compiler));
}
return {};
}
- const std::string ShaderModule::GetHLSLSource(PipelineLayout* layout) {
+ ResultOrError<std::string> ShaderModule::GetHLSLSource(PipelineLayout* layout) {
std::unique_ptr<spirv_cross::CompilerHLSL> compiler_impl;
spirv_cross::CompilerHLSL* compiler;
- if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- compiler = reinterpret_cast<spirv_cross::CompilerHLSL*>(mSpvcContext.GetCompiler());
- // TODO(rharrison): Check status & have some sort of meaningful error path
- } else {
+ if (!GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
// If these options are changed, the values in DawnSPIRVCrossHLSLFastFuzzer.cpp need to
// be updated.
spirv_cross::CompilerGLSL::Options options_glsl;
@@ -97,22 +93,34 @@ namespace dawn_native { namespace d3d12 {
const ModuleBindingInfo& moduleBindingInfo = GetBindingInfo();
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const auto& bindingOffsets =
- ToBackend(layout->GetBindGroupLayout(group))->GetBindingOffsets();
+ const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+ const auto& bindingOffsets = bgl->GetBindingOffsets();
const auto& groupBindingInfo = moduleBindingInfo[group];
- for (uint32_t binding = 0; binding < groupBindingInfo.size(); ++binding) {
- const BindingInfo& bindingInfo = groupBindingInfo[binding];
- if (bindingInfo.used) {
- uint32_t bindingOffset = bindingOffsets[binding];
+ for (const auto& it : groupBindingInfo) {
+ const ShaderBindingInfo& bindingInfo = it.second;
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = bgl->GetBindingIndex(bindingNumber);
+
+ uint32_t bindingOffset = bindingOffsets[bindingIndex];
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.SetDecoration(bindingInfo.id, SHADERC_SPVC_DECORATION_BINDING,
+ bindingOffset),
+ "Unable to set decorating binding before generating HLSL shader w/ "
+ "spvc"));
+ } else {
compiler->set_decoration(bindingInfo.id, spv::DecorationBinding, bindingOffset);
}
}
}
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc::CompilationResult result;
- mSpvcContext.CompileShader(&result);
- // TODO(rharrison): Check status & have some sort of meaningful error path
- return result.GetStringOutput();
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.CompileShader(&result),
+ "Unable to generate HLSL shader w/ spvc"));
+ std::string result_string;
+ DAWN_TRY(CheckSpvcSuccess(result.GetStringOutput(&result_string),
+ "Unable to get HLSL shader text"));
+ return result_string;
} else {
return compiler->compile();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
index bcec904779a..0b4aeff19a3 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
@@ -27,7 +27,7 @@ namespace dawn_native { namespace d3d12 {
static ResultOrError<ShaderModule*> Create(Device* device,
const ShaderModuleDescriptor* descriptor);
- const std::string GetHLSLSource(PipelineLayout* layout);
+ ResultOrError<std::string> GetHLSLSource(PipelineLayout* layout);
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
new file mode 100644
index 00000000000..101ca4b190c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -0,0 +1,206 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ // Check that d3d heap type enum correctly mirrors the type index used by the static arrays.
+ static_assert(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV == 0, "");
+ static_assert(D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER == 1, "");
+
+ // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
+ static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {1024, 512};
+
+ uint32_t GetD3D12ShaderVisibleHeapSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(Device* device)
+ : mDevice(device),
+ mSizeIncrements{
+ device->GetD3D12Device()->GetDescriptorHandleIncrementSize(
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV),
+ device->GetD3D12Device()->GetDescriptorHandleIncrementSize(
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER),
+ } {
+ }
+
+ MaybeError ShaderVisibleDescriptorAllocator::Initialize() {
+ ASSERT(mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV].heap.Get() == nullptr);
+ mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV].heapType =
+ D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
+
+ ASSERT(mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER].heap.Get() == nullptr);
+ mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER].heapType =
+ D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER;
+
+ DAWN_TRY(AllocateAndSwitchShaderVisibleHeaps());
+
+ return {};
+ }
+
+ MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeaps() {
+ DAWN_TRY(AllocateGPUHeap(&mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV]));
+ DAWN_TRY(AllocateGPUHeap(&mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER]));
+
+ // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
+ // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
+ // heap serial.
+ mShaderVisibleHeapsSerial++;
+
+ return {};
+ }
+
+ ResultOrError<DescriptorHeapAllocation>
+ ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(uint32_t descriptorCount,
+ Serial pendingSerial,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ ASSERT(mShaderVisibleBuffers[heapType].heap != nullptr);
+ const uint64_t startOffset =
+ mShaderVisibleBuffers[heapType].allocator.Allocate(descriptorCount, pendingSerial);
+ if (startOffset == RingBufferAllocator::kInvalidOffset) {
+ return DescriptorHeapAllocation{}; // Invalid
+ }
+
+ ID3D12DescriptorHeap* descriptorHeap = mShaderVisibleBuffers[heapType].heap.Get();
+
+ D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor =
+ descriptorHeap->GetCPUDescriptorHandleForHeapStart();
+ baseCPUDescriptor.ptr += mSizeIncrements[heapType] * startOffset;
+
+ D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor =
+ descriptorHeap->GetGPUDescriptorHandleForHeapStart();
+ baseGPUDescriptor.ptr += mSizeIncrements[heapType] * startOffset;
+
+ return DescriptorHeapAllocation{mSizeIncrements[heapType], baseCPUDescriptor,
+ baseGPUDescriptor};
+ }
+
+ std::array<ID3D12DescriptorHeap*, 2> ShaderVisibleDescriptorAllocator::GetShaderVisibleHeaps()
+ const {
+ return {mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV].heap.Get(),
+ mShaderVisibleBuffers[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER].heap.Get()};
+ }
+
+ void ShaderVisibleDescriptorAllocator::Tick(uint64_t completedSerial) {
+ for (uint32_t i = 0; i < mShaderVisibleBuffers.size(); i++) {
+ ASSERT(mShaderVisibleBuffers[i].heap != nullptr);
+ mShaderVisibleBuffers[i].allocator.Deallocate(completedSerial);
+ }
+ }
+
+ // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
+ MaybeError ShaderVisibleDescriptorAllocator::AllocateGPUHeap(
+ ShaderVisibleBuffer* shaderVisibleBuffer) {
+ ComPtr<ID3D12DescriptorHeap> heap;
+ // Return the switched out heap to the pool and retrieve the oldest heap that is no longer
+ // used by GPU. This maintains a heap buffer to avoid frequently re-creating heaps for heavy
+ // users.
+ // TODO(dawn:256): Consider periodically triming to avoid OOM.
+ if (shaderVisibleBuffer->heap != nullptr) {
+ shaderVisibleBuffer->pool.push_back(
+ {mDevice->GetPendingCommandSerial(), std::move(shaderVisibleBuffer->heap)});
+ }
+
+ // Recycle existing heap if possible.
+ if (!shaderVisibleBuffer->pool.empty() &&
+ shaderVisibleBuffer->pool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+ heap = std::move(shaderVisibleBuffer->pool.front().heap);
+ shaderVisibleBuffer->pool.pop_front();
+ }
+
+ const D3D12_DESCRIPTOR_HEAP_TYPE heapType = shaderVisibleBuffer->heapType;
+
+ // TODO(bryan.bernhart@intel.com): Allocating to max heap size wastes memory
+ // should the developer not allocate any bindings for the heap type.
+ // Consider dynamically re-sizing GPU heaps.
+ const uint32_t descriptorCount = GetD3D12ShaderVisibleHeapSize(
+ heapType, mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+ if (heap == nullptr) {
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = heapType;
+ heapDescriptor.NumDescriptors = descriptorCount;
+ heapDescriptor.Flags = GetD3D12HeapFlags(heapType);
+ heapDescriptor.NodeMask = 0;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+ &heapDescriptor, IID_PPV_ARGS(&heap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+ }
+
+ // Create a FIFO buffer from the recently created heap.
+ shaderVisibleBuffer->heap = std::move(heap);
+ shaderVisibleBuffer->allocator = RingBufferAllocator(descriptorCount);
+ return {};
+ }
+
+ Serial ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapsSerial() const {
+ return mShaderVisibleHeapsSerial;
+ }
+
+ uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting(
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType) const {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ return mShaderVisibleBuffers[heapType].allocator.GetSize();
+ }
+
+ ComPtr<ID3D12DescriptorHeap> ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapForTesting(
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType) const {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ return mShaderVisibleBuffers[heapType].heap;
+ }
+
+ uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting(
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType) const {
+ ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+ heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+ return mShaderVisibleBuffers[heapType].pool.size();
+ }
+
+ bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(Serial lastUsageSerial,
+ Serial heapSerial) const {
+ // Consider valid if allocated for the pending submit and the shader visible heaps
+ // have not switched over.
+ return (lastUsageSerial > mDevice->GetCompletedCommandSerial() &&
+ heapSerial == mShaderVisibleHeapsSerial);
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
new file mode 100644
index 00000000000..66f63f55eb8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -0,0 +1,80 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+#define DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
+
+#include "dawn_native/Error.h"
+#include "dawn_native/RingBufferAllocator.h"
+#include "dawn_native/d3d12/DescriptorHeapAllocationD3D12.h"
+
+#include <array>
+#include <list>
+
+namespace dawn_native { namespace d3d12 {
+
+ class Device;
+
+ // Manages descriptor heap allocators used by the device to create descriptors using allocation
+ // methods based on the heap type.
+ class ShaderVisibleDescriptorAllocator {
+ public:
+ ShaderVisibleDescriptorAllocator(Device* device);
+ MaybeError Initialize();
+
+ ResultOrError<DescriptorHeapAllocation> AllocateGPUDescriptors(
+ uint32_t descriptorCount,
+ Serial pendingSerial,
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+ void Tick(uint64_t completedSerial);
+ Serial GetShaderVisibleHeapsSerial() const;
+
+ std::array<ID3D12DescriptorHeap*, 2> GetShaderVisibleHeaps() const;
+ MaybeError AllocateAndSwitchShaderVisibleHeaps();
+
+ uint64_t GetShaderVisibleHeapSizeForTesting(D3D12_DESCRIPTOR_HEAP_TYPE heapType) const;
+ ComPtr<ID3D12DescriptorHeap> GetShaderVisibleHeapForTesting(
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType) const;
+ uint64_t GetShaderVisiblePoolSizeForTesting(D3D12_DESCRIPTOR_HEAP_TYPE heapType) const;
+
+ bool IsAllocationStillValid(Serial lastUsageSerial, Serial heapSerial) const;
+
+ private:
+ struct SerialDescriptorHeap {
+ Serial heapSerial;
+ ComPtr<ID3D12DescriptorHeap> heap;
+ };
+
+ struct ShaderVisibleBuffer {
+ ComPtr<ID3D12DescriptorHeap> heap;
+ RingBufferAllocator allocator;
+ std::list<SerialDescriptorHeap> pool;
+ D3D12_DESCRIPTOR_HEAP_TYPE heapType;
+ };
+
+ MaybeError AllocateGPUHeap(ShaderVisibleBuffer* shaderVisibleBuffer);
+
+ Device* mDevice;
+
+ // The serial value of 0 means the shader-visible heaps have not been allocated.
+ // This value is never returned by GetShaderVisibleHeapsSerial() after Initialize().
+ Serial mShaderVisibleHeapsSerial = 0;
+
+ std::array<ShaderVisibleBuffer, 2> mShaderVisibleBuffers;
+ std::array<uint32_t, 2> mSizeIncrements;
+ };
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index c2b2cc1f3cd..2d6b5af1d0f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -15,6 +15,8 @@
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -40,6 +42,11 @@ namespace dawn_native { namespace d3d12 {
mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
D3D12_RESOURCE_STATE_GENERIC_READ));
+ // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+ // evicted. This buffer should already have been made resident when it was created.
+ DAWN_TRY(mDevice->GetResidencyManager()->LockMappableHeap(
+ ToBackend(mUploadHeap.GetResourceHeap())));
+
return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
}
@@ -49,6 +56,12 @@ namespace dawn_native { namespace d3d12 {
if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
return;
}
+
+ // The underlying heap was locked in residency upon creation. We must unlock it when this
+ // buffer becomes unmapped.
+ mDevice->GetResidencyManager()->UnlockMappableHeap(
+ ToBackend(mUploadHeap.GetResourceHeap()));
+
// Invalidate the CPU virtual address & flush cache (if needed).
GetResource()->Unmap(0, nullptr);
mMappedPointer = nullptr;
@@ -59,5 +72,4 @@ namespace dawn_native { namespace d3d12 {
ID3D12Resource* StagingBuffer::GetResource() const {
return mUploadHeap.GetD3D12Resource().Get();
}
-
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
index ebba0c67185..5bc32a26e22 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.h
@@ -21,6 +21,7 @@
namespace dawn_native { namespace d3d12 {
+ class CommandRecordingContext;
class Device;
class StagingBuffer : public StagingBufferBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index aec8a61c288..45ec25bb979 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -22,7 +22,7 @@
namespace dawn_native { namespace d3d12 {
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device, descriptor) {
+ : OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextD3D12 wsiContext = {};
wsiContext.device = reinterpret_cast<WGPUDevice>(GetDevice());
@@ -40,7 +40,7 @@ namespace dawn_native { namespace d3d12 {
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
return nullptr;
}
@@ -55,7 +55,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
// Perform the necessary transition for the texture to be presented.
- ToBackend(texture)->TransitionUsageNow(commandContext, mTextureUsage);
+ ToBackend(texture)->TrackUsageAndTransitionNow(commandContext, mTextureUsage);
DAWN_TRY(device->ExecutePendingCommandContext());
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index 4b83ce4d09b..d601fdcc96c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -21,7 +21,7 @@ namespace dawn_native { namespace d3d12 {
class Device;
- class SwapChain : public SwapChainBase {
+ class SwapChain : public OldSwapChainBase {
public:
SwapChain(Device* device, const SwapChainDescriptor* descriptor);
~SwapChain();
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index f37dc76731c..c9242c15ea5 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -19,9 +19,11 @@
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/Error.h"
#include "dawn_native/d3d12/BufferD3D12.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
@@ -280,19 +282,28 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<TextureBase*> Texture::Create(Device* device,
- const TextureDescriptor* descriptor,
+ const ExternalImageDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey) {
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture) {
+ const TextureDescriptor* textureDescriptor =
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+
Ref<Texture> dawnTexture =
- AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
- DAWN_TRY(
- dawnTexture->InitializeAsExternalTexture(descriptor, sharedHandle, acquireMutexKey));
+ AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
+ DAWN_TRY(dawnTexture->InitializeAsExternalTexture(textureDescriptor, sharedHandle,
+ acquireMutexKey, isSwapChainTexture));
+
+ dawnTexture->SetIsSubresourceContentInitialized(descriptor->isCleared, 0,
+ textureDescriptor->mipLevelCount, 0,
+ textureDescriptor->arrayLayerCount);
return dawnTexture.Detach();
}
MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey) {
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture) {
Device* dawnDevice = ToBackend(GetDevice());
DAWN_TRY(ValidateTextureDescriptor(dawnDevice, descriptor));
DAWN_TRY(ValidateTextureDescriptorCanBeWrapped(descriptor));
@@ -313,13 +324,14 @@ namespace dawn_native { namespace d3d12 {
mAcquireMutexKey = acquireMutexKey;
mDxgiKeyedMutex = std::move(dxgiKeyedMutex);
+ mSwapChainTexture = isSwapChainTexture;
AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
- mResourceAllocation = {info, 0, std::move(d3d12Resource)};
-
- SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
- descriptor->arrayLayerCount);
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(d3d12Resource), nullptr};
return {};
}
@@ -366,8 +378,11 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Resource> nativeTexture)
: TextureBase(device, descriptor, TextureState::OwnedExternal) {
AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
- mResourceAllocation = {info, 0, std::move(nativeTexture)};
+ info.mMethod = AllocationMethod::kExternal;
+ // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+ // texture is owned externally. The texture's owning entity must remain responsible for
+ // memory management.
+ mResourceAllocation = {info, 0, std::move(nativeTexture), nullptr};
SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
descriptor->arrayLayerCount);
@@ -379,6 +394,20 @@ namespace dawn_native { namespace d3d12 {
void Texture::DestroyImpl() {
Device* device = ToBackend(GetDevice());
+
+ // In PIX's D3D12-only mode, there is no way to determine frame boundaries
+ // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
+ // PIX will wait forever for a present that never happens.
+ // If we know we're dealing with a swapbuffer texture, inform PIX we've
+ // "presented" the texture so it can determine frame boundaries and use its
+ // contents for the UI.
+ if (mSwapChainTexture) {
+ ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
+ if (d3dSharingContract != nullptr) {
+ d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource().Get(), 0, 0);
+ }
+ }
+
device->DeallocateMemory(mResourceAllocation);
if (mDxgiKeyedMutex != nullptr) {
@@ -407,11 +436,45 @@ namespace dawn_native { namespace d3d12 {
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
// cause subsequent errors.
- bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::TextureUsage newUsage) {
- return TransitionUsageAndGetResourceBarrier(commandContext, barrier,
- D3D12TextureUsage(newUsage, GetFormat()));
+ bool Texture::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::TextureUsage newUsage) {
+ return TrackUsageAndGetResourceBarrier(commandContext, barrier,
+ D3D12TextureUsage(newUsage, GetFormat()));
+ }
+
+ // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+ // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+ // cause subsequent errors.
+ bool Texture::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ D3D12_RESOURCE_STATES newState) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+ }
+
+ // Return the resource barrier.
+ return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newState);
+ }
+
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage) {
+ D3D12_RESOURCE_BARRIER barrier;
+
+ if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, usage)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ }
+ }
+
+ void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState) {
+ D3D12_RESOURCE_BARRIER barrier;
+
+ if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newState)) {
+ commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+ }
}
// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
@@ -492,21 +555,7 @@ namespace dawn_native { namespace d3d12 {
return true;
}
- void Texture::TransitionUsageNow(CommandRecordingContext* commandContext,
- wgpu::TextureUsage usage) {
- TransitionUsageNow(commandContext, D3D12TextureUsage(usage, GetFormat()));
- }
-
- void Texture::TransitionUsageNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState) {
- D3D12_RESOURCE_BARRIER barrier;
-
- if (TransitionUsageAndGetResourceBarrier(commandContext, &barrier, newState)) {
- commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
- }
- }
-
- D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t baseMipLevel,
+ D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
uint32_t baseArrayLayer,
uint32_t layerCount) const {
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
@@ -516,7 +565,7 @@ namespace dawn_native { namespace d3d12 {
ASSERT(GetNumMipLevels() == 1);
ASSERT(layerCount == 1);
ASSERT(baseArrayLayer == 0);
- ASSERT(baseMipLevel == 0);
+ ASSERT(mipLevel == 0);
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
} else {
// Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base array
@@ -528,23 +577,30 @@ namespace dawn_native { namespace d3d12 {
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
rtvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
rtvDesc.Texture2DArray.ArraySize = layerCount;
- rtvDesc.Texture2DArray.MipSlice = baseMipLevel;
+ rtvDesc.Texture2DArray.MipSlice = mipLevel;
rtvDesc.Texture2DArray.PlaneSlice = 0;
}
return rtvDesc;
}
- D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t baseMipLevel) const {
+ D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount) const {
D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
dsvDesc.Format = GetD3D12Format();
dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
- ASSERT(baseMipLevel == 0);
if (IsMultisampledTexture()) {
+ ASSERT(GetNumMipLevels() == 1);
+ ASSERT(layerCount == 1);
+ ASSERT(baseArrayLayer == 0);
+ ASSERT(mipLevel == 0);
dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
} else {
- dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D;
- dsvDesc.Texture2D.MipSlice = baseMipLevel;
+ dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
+ dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+ dsvDesc.Texture2DArray.ArraySize = layerCount;
+ dsvDesc.Texture2DArray.MipSlice = mipLevel;
}
return dsvDesc;
@@ -567,45 +623,69 @@ namespace dawn_native { namespace d3d12 {
Device* device = ToBackend(GetDevice());
DescriptorHeapAllocator* descriptorHeapAllocator = device->GetDescriptorHeapAllocator();
+
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
if (GetFormat().isRenderable) {
if (GetFormat().HasDepthOrStencil()) {
- TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE);
- DescriptorHeapHandle dsvHeap;
- DAWN_TRY_ASSIGN(dsvHeap, descriptorHeapAllocator->AllocateCPUHeap(
- D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1));
- D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap.GetCPUHandle(0);
- D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc = GetDSVDescriptor(baseMipLevel);
- device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
- dsvHandle);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE);
D3D12_CLEAR_FLAGS clearFlags = {};
- if (GetFormat().HasDepth()) {
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- }
- if (GetFormat().HasStencil()) {
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- }
- commandList->ClearDepthStencilView(dsvHandle, clearFlags, clearColor, clearColor, 0,
- nullptr);
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ DescriptorHeapHandle dsvHeap;
+ DAWN_TRY_ASSIGN(dsvHeap, descriptorHeapAllocator->AllocateCPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1));
+ D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap.GetCPUHandle(0);
+ D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc = GetDSVDescriptor(level, layer, 1);
+ device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(),
+ &dsvDesc, dsvHandle);
+
+ if (GetFormat().HasDepth()) {
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ }
+ if (GetFormat().HasStencil()) {
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ }
+
+ commandList->ClearDepthStencilView(dsvHandle, clearFlags, fClearColor,
+ clearColor, 0, nullptr);
+ }
+ }
} else {
- TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET);
- DescriptorHeapHandle rtvHeap;
- DAWN_TRY_ASSIGN(rtvHeap, descriptorHeapAllocator->AllocateCPUHeap(
- D3D12_DESCRIPTOR_HEAP_TYPE_RTV, 1));
- D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetCPUHandle(0);
- const float fClearColor = static_cast<float>(clearColor);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET);
+
const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor,
fClearColor};
- // TODO(natlee@microsoft.com): clear all array layers for 2D array textures
- for (uint32_t i = baseMipLevel; i < baseMipLevel + levelCount; i++) {
- D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
- GetRTVDescriptor(i, baseArrayLayer, layerCount);
- device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
- rtvHandle);
- commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ DescriptorHeapHandle rtvHeap;
+ DAWN_TRY_ASSIGN(rtvHeap, descriptorHeapAllocator->AllocateCPUHeap(
+ D3D12_DESCRIPTOR_HEAP_TYPE_RTV, 1));
+ D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetCPUHandle(0);
+
+ D3D12_RENDER_TARGET_VIEW_DESC rtvDesc = GetRTVDescriptor(level, layer, 1);
+ device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(),
+ &rtvDesc, rtvHandle);
+ commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
+ }
}
}
} else {
@@ -623,20 +703,24 @@ namespace dawn_native { namespace d3d12 {
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
- std::fill(reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer),
- reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer + bufferSize),
- clearColor);
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
- TransitionUsageNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST);
-
- // compute d3d12 texture copy locations for texture and buffer
- Extent3D copySize = {GetSize().width, GetSize().height, 1};
- TextureCopySplit copySplit = ComputeTextureCopySplit(
- {0, 0, 0}, copySize, GetFormat(), uploadHandle.startOffset, rowPitch, 0);
+ TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST);
for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ // compute d3d12 texture copy locations for texture and buffer
+ Extent3D copySize = GetMipLevelVirtualSize(level);
+ TextureCopySplit copySplit = ComputeTextureCopySplit(
+ {0, 0, 0}, copySize, GetFormat(), uploadHandle.startOffset, rowPitch, 0);
+
for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
D3D12_TEXTURE_COPY_LOCATION textureLocation =
ComputeTextureCopyLocationForTexture(this, level, layer);
for (uint32_t i = 0; i < copySplit.count; ++i) {
@@ -738,11 +822,10 @@ namespace dawn_native { namespace d3d12 {
D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor() const {
// TODO(jiawei.shao@intel.com): support rendering into a layer of a texture.
- ASSERT(GetLayerCount() == 1);
ASSERT(GetLevelCount() == 1);
- ASSERT(GetBaseMipLevel() == 0);
- ASSERT(GetBaseArrayLayer() == 0);
- return ToBackend(GetTexture())->GetDSVDescriptor(GetBaseMipLevel());
+ uint32_t mipLevel = GetBaseMipLevel();
+ return ToBackend(GetTexture())
+ ->GetDSVDescriptor(mipLevel, GetBaseArrayLayer(), GetLayerCount());
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 332ab5a610b..6c63b5602a6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -18,6 +18,7 @@
#include "common/Serial.h"
#include "dawn_native/Texture.h"
+#include "dawn_native/DawnNative.h"
#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
@@ -36,9 +37,10 @@ namespace dawn_native { namespace d3d12 {
static ResultOrError<TextureBase*> Create(Device* device,
const TextureDescriptor* descriptor);
static ResultOrError<TextureBase*> Create(Device* device,
- const TextureDescriptor* descriptor,
+ const ExternalImageDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey);
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture);
Texture(Device* device,
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> d3d12Texture);
@@ -47,30 +49,35 @@ namespace dawn_native { namespace d3d12 {
DXGI_FORMAT GetD3D12Format() const;
ID3D12Resource* GetD3D12Resource() const;
- bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_BARRIER* barrier,
- wgpu::TextureUsage newUsage);
- void TransitionUsageNow(CommandRecordingContext* commandContext, wgpu::TextureUsage usage);
- void TransitionUsageNow(CommandRecordingContext* commandContext,
- D3D12_RESOURCE_STATES newState);
- D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t baseMipLevel,
+ D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t mipLevel,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount) const;
+ D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
uint32_t baseArrayLayer,
uint32_t layerCount) const;
- D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t baseMipLevel) const;
void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount);
+ bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ wgpu::TextureUsage newUsage);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ wgpu::TextureUsage usage);
+ void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_STATES newState);
+
private:
using TextureBase::TextureBase;
MaybeError InitializeAsInternalTexture();
MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
HANDLE sharedHandle,
- uint64_t acquireMutexKey);
+ uint64_t acquireMutexKey,
+ bool isSwapChainTexture);
// Dawn API
void DestroyImpl() override;
@@ -83,6 +90,9 @@ namespace dawn_native { namespace d3d12 {
UINT16 GetDepthOrArraySize();
+ bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+ D3D12_RESOURCE_BARRIER* barrier,
+ D3D12_RESOURCE_STATES newState);
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
D3D12_RESOURCE_STATES newState);
@@ -92,6 +102,7 @@ namespace dawn_native { namespace d3d12 {
Serial mLastUsedSerial = UINT64_MAX;
bool mValidToDecay = false;
+ bool mSwapChainTexture = false;
Serial mAcquireMutexKey = 0;
ComPtr<IDXGIKeyedMutex> mDxgiKeyedMutex;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
index 646a387bace..2ec84109f00 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BackendMTL.mm
@@ -14,7 +14,7 @@
#include "dawn_native/metal/BackendMTL.h"
-#include "common/Constants.h"
+#include "common/GPUInfo.h"
#include "common/Platform.h"
#include "dawn_native/Instance.h"
#include "dawn_native/MetalBackend.h"
@@ -39,11 +39,11 @@ namespace dawn_native { namespace metal {
};
#if defined(DAWN_PLATFORM_MACOS)
- const Vendor kVendors[] = {{"AMD", kVendorID_AMD},
- {"Radeon", kVendorID_AMD},
- {"Intel", kVendorID_Intel},
- {"Geforce", kVendorID_Nvidia},
- {"Quadro", kVendorID_Nvidia}};
+ const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
+ {"Radeon", gpu_info::kVendorID_AMD},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"Geforce", gpu_info::kVendorID_Nvidia},
+ {"Quadro", gpu_info::kVendorID_Nvidia}};
// Find vendor ID from MTLDevice name.
MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
@@ -57,7 +57,7 @@ namespace dawn_native { namespace metal {
}
if (vendorId == 0) {
- return DAWN_DEVICE_LOST_ERROR("Failed to find vendor id with the device");
+ return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
}
// Set vendor id with 0
@@ -103,11 +103,12 @@ namespace dawn_native { namespace metal {
//
// [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
// their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
- MaybeError GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+ MaybeError API_AVAILABLE(macos(10.13))
+ GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
// Get a matching dictionary for the IOGraphicsAccelerator2
CFMutableDictionaryRef matchingDict = IORegistryEntryIDMatching([device registryID]);
if (matchingDict == nullptr) {
- return DAWN_DEVICE_LOST_ERROR("Failed to create the matching dict for the device");
+ return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
}
// IOServiceGetMatchingService will consume the reference on the matching dictionary,
@@ -115,7 +116,7 @@ namespace dawn_native { namespace metal {
io_registry_entry_t acceleratorEntry =
IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict);
if (acceleratorEntry == IO_OBJECT_NULL) {
- return DAWN_DEVICE_LOST_ERROR(
+ return DAWN_INTERNAL_ERROR(
"Failed to get the IO registry entry for the accelerator");
}
@@ -124,7 +125,7 @@ namespace dawn_native { namespace metal {
if (IORegistryEntryGetParentEntry(acceleratorEntry, kIOServicePlane, &deviceEntry) !=
kIOReturnSuccess) {
IOObjectRelease(acceleratorEntry);
- return DAWN_DEVICE_LOST_ERROR("Failed to get the IO registry entry for the device");
+ return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
}
ASSERT(deviceEntry != IO_OBJECT_NULL);
@@ -143,7 +144,7 @@ namespace dawn_native { namespace metal {
MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
// [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
// id by vendor name on old macOS
- if ([NSProcessInfo.processInfo isOperatingSystemAtLeastVersion:{10, 13, 0}]) {
+ if (@available(macos 10.13, *)) {
return GetDeviceIORegistryPCIInfo(device, ids);
} else {
return GetVendorIdFromVendors(device, ids);
@@ -175,7 +176,7 @@ namespace dawn_native { namespace metal {
class Adapter : public AdapterBase {
public:
Adapter(InstanceBase* instance, id<MTLDevice> device)
- : AdapterBase(instance, BackendType::Metal), mDevice([device retain]) {
+ : AdapterBase(instance, wgpu::BackendType::Metal), mDevice([device retain]) {
mPCIInfo.name = std::string([mDevice.name UTF8String]);
PCIIDs ids;
@@ -185,12 +186,12 @@ namespace dawn_native { namespace metal {
};
#if defined(DAWN_PLATFORM_IOS)
- mDeviceType = DeviceType::IntegratedGPU;
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
#elif defined(DAWN_PLATFORM_MACOS)
if ([device isLowPower]) {
- mDeviceType = DeviceType::IntegratedGPU;
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
} else {
- mDeviceType = DeviceType::DiscreteGPU;
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
}
#else
# error "Unsupported Apple platform."
@@ -220,7 +221,8 @@ namespace dawn_native { namespace metal {
// Implementation of the Metal backend's BackendConnection
- Backend::Backend(InstanceBase* instance) : BackendConnection(instance, BackendType::Metal) {
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::Metal) {
if (GetInstance()->IsBackendValidationEnabled()) {
setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
}
@@ -228,9 +230,10 @@ namespace dawn_native { namespace metal {
std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
std::vector<std::unique_ptr<AdapterBase>> adapters;
-
- if (@available(macOS 10.11, *)) {
+ BOOL supportedVersion = NO;
#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macOS 10.11, *)) {
+ supportedVersion = YES;
NSArray<id<MTLDevice>>* devices = MTLCopyAllDevices();
for (id<MTLDevice> device in devices) {
@@ -238,14 +241,18 @@ namespace dawn_native { namespace metal {
}
[devices release];
+ }
#endif
- } else if (@available(iOS 8.0, *)) {
+
#if defined(DAWN_PLATFORM_IOS)
+ if (@available(iOS 8.0, *)) {
+ supportedVersion = YES;
// iOS only has a single device so MTLCopyAllDevices doesn't exist there.
adapters.push_back(
std::make_unique<Adapter>(GetInstance(), MTLCreateSystemDefaultDevice()));
+ }
#endif
- } else {
+ if (!supportedVersion) {
UNREACHABLE();
}
return adapters;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
new file mode 100644
index 00000000000..79118355be4
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
+
+#include "common/SlabAllocator.h"
+#include "dawn_native/BindGroupLayout.h"
+
+namespace dawn_native { namespace metal {
+
+ class BindGroup;
+ class Device;
+
+ class BindGroupLayout : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+
+ BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ private:
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ };
+
+}} // namespace dawn_native::metal
+
+#endif // DAWNNATIVE_METAL_BINDGROUPLAYOUTMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
new file mode 100644
index 00000000000..70beb5d5374
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupLayoutMTL.mm
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/metal/BindGroupLayoutMTL.h"
+
+#include "dawn_native/metal/BindGroupMTL.h"
+
+namespace dawn_native { namespace metal {
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor)
+ : BindGroupLayoutBase(device, descriptor),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
+ BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return mBindGroupAllocator.Allocate(device, descriptor);
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
new file mode 100644
index 00000000000..4a0a22932c3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_METAL_BINDGROUPMTL_H_
+#define DAWNNATIVE_METAL_BINDGROUPMTL_H_
+
+#include "common/PlacementAllocated.h"
+#include "dawn_native/BindGroup.h"
+
+namespace dawn_native { namespace metal {
+
+ class BindGroupLayout;
+ class Device;
+
+ class BindGroup : public BindGroupBase, public PlacementAllocated {
+ public:
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ ~BindGroup() override;
+
+ static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
+ };
+
+}} // namespace dawn_native::metal
+
+#endif // DAWNNATIVE_METAL_BINDGROUPMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
new file mode 100644
index 00000000000..d8bcd515d97
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BindGroupMTL.mm
@@ -0,0 +1,34 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/metal/BindGroupMTL.h"
+
+#include "dawn_native/metal/BindGroupLayoutMTL.h"
+#include "dawn_native/metal/DeviceMTL.h"
+namespace dawn_native { namespace metal {
+
+ BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {
+ }
+
+ BindGroup::~BindGroup() {
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+ }
+
+ // static
+ BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
index 640d19666b6..67a1313eba6 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
@@ -26,25 +26,24 @@ namespace dawn_native {
namespace dawn_native { namespace metal {
+ class CommandRecordingContext;
class Device;
- struct GlobalEncoders;
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
- void FillCommands(id<MTLCommandBuffer> commandBuffer);
+ void FillCommands(CommandRecordingContext* commandContext);
private:
- void EncodeComputePass(id<MTLCommandBuffer> commandBuffer);
- void EncodeRenderPass(id<MTLCommandBuffer> commandBuffer,
+ void EncodeComputePass(CommandRecordingContext* commandContext);
+ void EncodeRenderPass(CommandRecordingContext* commandContext,
MTLRenderPassDescriptor* mtlRenderPass,
- GlobalEncoders* globalEncoders,
uint32_t width,
uint32_t height);
- void EncodeRenderPassInternal(id<MTLCommandBuffer> commandBuffer,
+ void EncodeRenderPassInternal(CommandRecordingContext* commandContext,
MTLRenderPassDescriptor* mtlRenderPass,
uint32_t width,
uint32_t height);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index 38294dd63c7..67ec57cc7e8 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -14,11 +14,11 @@
#include "dawn_native/metal/CommandBufferMTL.h"
-#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
+#include "dawn_native/metal/BindGroupMTL.h"
#include "dawn_native/metal/BufferMTL.h"
#include "dawn_native/metal/ComputePipelineMTL.h"
#include "dawn_native/metal/DeviceMTL.h"
@@ -29,25 +29,17 @@
namespace dawn_native { namespace metal {
- struct GlobalEncoders {
- id<MTLBlitCommandEncoder> blit = nil;
-
- void Finish() {
- if (blit != nil) {
- [blit endEncoding];
- blit = nil; // This will be autoreleased.
- }
- }
-
- void EnsureBlit(id<MTLCommandBuffer> commandBuffer) {
- if (blit == nil) {
- blit = [commandBuffer blitCommandEncoder];
- }
- }
- };
-
namespace {
+ // Allows this file to use MTLStoreActionStoreAndMultismapleResolve because the logic is
+ // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
+ // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+ constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
+ MTLStoreActionStoreAndMultisampleResolve;
+#pragma clang diagnostic pop
+
// Creates an autoreleased MTLRenderPassDescriptor matching desc
MTLRenderPassDescriptor* CreateMTLRenderPassDescriptor(BeginRenderPassCmd* renderPass) {
MTLRenderPassDescriptor* descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
@@ -56,13 +48,21 @@ namespace dawn_native { namespace metal {
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
auto& attachmentInfo = renderPass->colorAttachments[i];
- if (attachmentInfo.loadOp == wgpu::LoadOp::Clear) {
- descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
- descriptor.colorAttachments[i].clearColor =
- MTLClearColorMake(attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
- attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
- } else {
- descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ switch (attachmentInfo.loadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
+ descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
+ attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
+ attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
}
descriptor.colorAttachments[i].texture =
@@ -70,19 +70,32 @@ namespace dawn_native { namespace metal {
descriptor.colorAttachments[i].level = attachmentInfo.view->GetBaseMipLevel();
descriptor.colorAttachments[i].slice = attachmentInfo.view->GetBaseArrayLayer();
- if (attachmentInfo.storeOp == wgpu::StoreOp::Store) {
- if (attachmentInfo.resolveTarget.Get() != nullptr) {
- descriptor.colorAttachments[i].resolveTexture =
- ToBackend(attachmentInfo.resolveTarget->GetTexture())->GetMTLTexture();
- descriptor.colorAttachments[i].resolveLevel =
- attachmentInfo.resolveTarget->GetBaseMipLevel();
- descriptor.colorAttachments[i].resolveSlice =
- attachmentInfo.resolveTarget->GetBaseArrayLayer();
- descriptor.colorAttachments[i].storeAction =
- MTLStoreActionStoreAndMultisampleResolve;
- } else {
- descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
- }
+ bool hasResolveTarget = attachmentInfo.resolveTarget.Get() != nullptr;
+
+ switch (attachmentInfo.storeOp) {
+ case wgpu::StoreOp::Store:
+ if (hasResolveTarget) {
+ descriptor.colorAttachments[i].resolveTexture =
+ ToBackend(attachmentInfo.resolveTarget->GetTexture())
+ ->GetMTLTexture();
+ descriptor.colorAttachments[i].resolveLevel =
+ attachmentInfo.resolveTarget->GetBaseMipLevel();
+ descriptor.colorAttachments[i].resolveSlice =
+ attachmentInfo.resolveTarget->GetBaseArrayLayer();
+ descriptor.colorAttachments[i].storeAction =
+ kMTLStoreActionStoreAndMultisampleResolve;
+ } else {
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
+ }
+ break;
+
+ case wgpu::StoreOp::Clear:
+ descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
}
}
@@ -96,25 +109,67 @@ namespace dawn_native { namespace metal {
if (format.HasDepth()) {
descriptor.depthAttachment.texture = texture;
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- if (attachmentInfo.depthLoadOp == wgpu::LoadOp::Clear) {
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
- } else {
- descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+ switch (attachmentInfo.depthStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ break;
+
+ case wgpu::StoreOp::Clear:
+ descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ switch (attachmentInfo.depthLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
}
}
if (format.HasStencil()) {
descriptor.stencilAttachment.texture = texture;
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- if (attachmentInfo.stencilLoadOp == wgpu::LoadOp::Clear) {
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
- } else {
- descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+ switch (attachmentInfo.stencilStoreOp) {
+ case wgpu::StoreOp::Store:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ break;
+
+ case wgpu::StoreOp::Clear:
+ descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ switch (attachmentInfo.stencilLoadOp) {
+ case wgpu::LoadOp::Clear:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
+ break;
+
+ case wgpu::LoadOp::Load:
+ descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
}
}
}
@@ -124,7 +179,7 @@ namespace dawn_native { namespace metal {
// Helper function for Toggle EmulateStoreAndMSAAResolve
void ResolveInAnotherRenderPass(
- id<MTLCommandBuffer> commandBuffer,
+ CommandRecordingContext* commandContext,
const MTLRenderPassDescriptor* mtlRenderPass,
const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
MTLRenderPassDescriptor* mtlRenderPassForResolve =
@@ -146,9 +201,8 @@ namespace dawn_native { namespace metal {
mtlRenderPass.colorAttachments[i].resolveSlice;
}
- id<MTLRenderCommandEncoder> encoder =
- [commandBuffer renderCommandEncoderWithDescriptor:mtlRenderPassForResolve];
- [encoder endEncoding];
+ commandContext->BeginRender(mtlRenderPassForResolve);
+ commandContext->EndRender();
}
// Helper functions for Toggle AlwaysResolveIntoZeroLevelAndLayer
@@ -173,24 +227,22 @@ namespace dawn_native { namespace metal {
return resolveTexture;
}
- void CopyIntoTrueResolveTarget(id<MTLCommandBuffer> commandBuffer,
+ void CopyIntoTrueResolveTarget(CommandRecordingContext* commandContext,
id<MTLTexture> mtlTrueResolveTexture,
uint32_t trueResolveLevel,
uint32_t trueResolveSlice,
id<MTLTexture> temporaryResolveTexture,
uint32_t width,
- uint32_t height,
- GlobalEncoders* encoders) {
- encoders->EnsureBlit(commandBuffer);
- [encoders->blit copyFromTexture:temporaryResolveTexture
- sourceSlice:0
- sourceLevel:0
- sourceOrigin:MTLOriginMake(0, 0, 0)
- sourceSize:MTLSizeMake(width, height, 1)
- toTexture:mtlTrueResolveTexture
- destinationSlice:trueResolveSlice
- destinationLevel:trueResolveLevel
- destinationOrigin:MTLOriginMake(0, 0, 0)];
+ uint32_t height) {
+ [commandContext->EnsureBlit() copyFromTexture:temporaryResolveTexture
+ sourceSlice:0
+ sourceLevel:0
+ sourceOrigin:MTLOriginMake(0, 0, 0)
+ sourceSize:MTLSizeMake(width, height, 1)
+ toTexture:mtlTrueResolveTexture
+ destinationSlice:trueResolveSlice
+ destinationLevel:trueResolveLevel
+ destinationOrigin:MTLOriginMake(0, 0, 0)];
}
// Metal uses a physical addressing mode which means buffers in the shading language are
@@ -390,6 +442,25 @@ namespace dawn_native { namespace metal {
return copy;
}
+ void EnsureSourceTextureInitialized(Texture* texture,
+ const Extent3D& size,
+ const TextureCopy& src) {
+ // TODO(crbug.com/dawn/145): Specify multiple layers based on |size|
+ texture->EnsureSubresourceContentInitialized(src.mipLevel, 1, src.arrayLayer, 1);
+ }
+
+ void EnsureDestinationTextureInitialized(Texture* texture,
+ const Extent3D& size,
+ const TextureCopy& dst) {
+ // TODO(crbug.com/dawn/145): Specify multiple layers based on |size|
+ if (IsCompleteSubresourceCopiedTo(texture, size, dst.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, dst.mipLevel, 1, dst.arrayLayer,
+ 1);
+ } else {
+ texture->EnsureSubresourceContentInitialized(dst.mipLevel, 1, dst.arrayLayer, 1);
+ }
+ }
+
// Keeps track of the dirty bind groups so they can be lazily applied when we know the
// pipeline state.
// Bind groups may be inherited because bind groups are packed in the buffer /
@@ -422,17 +493,22 @@ namespace dawn_native { namespace metal {
uint32_t dynamicOffsetCount,
uint64_t* dynamicOffsets,
PipelineLayout* pipelineLayout) {
- const auto& layout = group->GetLayout()->GetBindingInfo();
uint32_t currentDynamicBufferIndex = 0;
// TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup
// so that we only have to do one setVertexBuffers and one setFragmentBuffers
// call here.
- for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
- auto stage = layout.visibilities[bindingIndex];
- bool hasVertStage = stage & wgpu::ShaderStage::Vertex && render != nil;
- bool hasFragStage = stage & wgpu::ShaderStage::Fragment && render != nil;
- bool hasComputeStage = stage & wgpu::ShaderStage::Compute && compute != nil;
+ for (BindingIndex bindingIndex = 0;
+ bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ bool hasVertStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nil;
+ bool hasFragStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nil;
+ bool hasComputeStage =
+ bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nil;
uint32_t vertIndex = 0;
uint32_t fragIndex = 0;
@@ -451,9 +527,10 @@ namespace dawn_native { namespace metal {
SingleShaderStage::Compute)[index][bindingIndex];
}
- switch (layout.types[bindingIndex]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
- case wgpu::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
const BufferBinding& binding =
group->GetBindingAsBufferBinding(bindingIndex);
const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
@@ -461,7 +538,7 @@ namespace dawn_native { namespace metal {
// TODO(shaobo.yan@intel.com): Record bound buffer status to use
// setBufferOffset to achieve better performance.
- if (layout.hasDynamicOffset[bindingIndex]) {
+ if (bindingInfo.hasDynamicOffset) {
offset += dynamicOffsets[currentDynamicBufferIndex];
currentDynamicBufferIndex++;
}
@@ -491,7 +568,8 @@ namespace dawn_native { namespace metal {
withRange:NSMakeRange(computeIndex, 1)];
}
- } break;
+ break;
+ }
case wgpu::BindingType::Sampler: {
auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
@@ -507,7 +585,8 @@ namespace dawn_native { namespace metal {
[compute setSamplerState:sampler->GetMTLSamplerState()
atIndex:computeIndex];
}
- } break;
+ break;
+ }
case wgpu::BindingType::SampledTexture: {
auto textureView =
@@ -524,10 +603,12 @@ namespace dawn_native { namespace metal {
[compute setTexture:textureView->GetMTLTexture()
atIndex:computeIndex];
}
- } break;
+ break;
+ }
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
@@ -599,35 +680,63 @@ namespace dawn_native { namespace metal {
FreeCommands(&mCommands);
}
- void CommandBuffer::FillCommands(id<MTLCommandBuffer> commandBuffer) {
- GlobalEncoders encoders;
+ void CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+ const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
+ size_t nextPassNumber = 0;
+
+ auto LazyClearForPass = [](const PassResourceUsage& usages) {
+ for (size_t i = 0; i < usages.textures.size(); ++i) {
+ Texture* texture = ToBackend(usages.textures[i]);
+ // Clear textures that are not output attachments. Output attachments will be
+ // cleared in CreateMTLRenderPassDescriptor by setting the loadop to clear when the
+ // texture subresource has not been initialized before the render pass.
+ if (!(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment)) {
+ texture->EnsureSubresourceContentInitialized(0, texture->GetNumMipLevels(), 0,
+ texture->GetArrayLayers());
+ }
+ }
+ };
Command type;
while (mCommands.NextCommandId(&type)) {
switch (type) {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- encoders.Finish();
- EncodeComputePass(commandBuffer);
- } break;
+
+ LazyClearForPass(passResourceUsages[nextPassNumber]);
+ commandContext->EndBlit();
+
+ EncodeComputePass(commandContext);
+
+ nextPassNumber++;
+ break;
+ }
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- encoders.Finish();
+
+ LazyClearForPass(passResourceUsages[nextPassNumber]);
+ commandContext->EndBlit();
+
+ LazyClearRenderPassAttachments(cmd);
MTLRenderPassDescriptor* descriptor = CreateMTLRenderPassDescriptor(cmd);
- EncodeRenderPass(commandBuffer, descriptor, &encoders, cmd->width, cmd->height);
- } break;
+ EncodeRenderPass(commandContext, descriptor, cmd->width, cmd->height);
+
+ nextPassNumber++;
+ break;
+ }
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
- encoders.EnsureBlit(commandBuffer);
- [encoders.blit copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
- sourceOffset:copy->sourceOffset
- toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
- destinationOffset:copy->destinationOffset
- size:copy->size];
- } break;
+ [commandContext->EnsureBlit()
+ copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
+ sourceOffset:copy->sourceOffset
+ toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
+ destinationOffset:copy->destinationOffset
+ size:copy->size];
+ break;
+ }
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
@@ -637,25 +746,27 @@ namespace dawn_native { namespace metal {
Buffer* buffer = ToBackend(src.buffer.Get());
Texture* texture = ToBackend(dst.texture.Get());
+ EnsureDestinationTextureInitialized(texture, copy->copySize, copy->destination);
+
Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(dst.mipLevel);
TextureBufferCopySplit splittedCopies = ComputeTextureBufferCopySplit(
dst.origin, copySize, texture->GetFormat(), virtualSizeAtLevel,
buffer->GetSize(), src.offset, src.rowPitch, src.imageHeight);
- encoders.EnsureBlit(commandBuffer);
for (uint32_t i = 0; i < splittedCopies.count; ++i) {
const TextureBufferCopySplit::CopyInfo& copyInfo = splittedCopies.copies[i];
- [encoders.blit copyFromBuffer:buffer->GetMTLBuffer()
- sourceOffset:copyInfo.bufferOffset
- sourceBytesPerRow:copyInfo.bytesPerRow
- sourceBytesPerImage:copyInfo.bytesPerImage
- sourceSize:copyInfo.copyExtent
- toTexture:texture->GetMTLTexture()
- destinationSlice:dst.arrayLayer
- destinationLevel:dst.mipLevel
- destinationOrigin:copyInfo.textureOrigin];
+ [commandContext->EnsureBlit() copyFromBuffer:buffer->GetMTLBuffer()
+ sourceOffset:copyInfo.bufferOffset
+ sourceBytesPerRow:copyInfo.bytesPerRow
+ sourceBytesPerImage:copyInfo.bytesPerImage
+ sourceSize:copyInfo.copyExtent
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:dst.arrayLayer
+ destinationLevel:dst.mipLevel
+ destinationOrigin:copyInfo.textureOrigin];
}
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
@@ -665,25 +776,27 @@ namespace dawn_native { namespace metal {
Texture* texture = ToBackend(src.texture.Get());
Buffer* buffer = ToBackend(dst.buffer.Get());
+ EnsureSourceTextureInitialized(texture, copy->copySize, copy->source);
+
Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(src.mipLevel);
TextureBufferCopySplit splittedCopies = ComputeTextureBufferCopySplit(
src.origin, copySize, texture->GetFormat(), virtualSizeAtLevel,
buffer->GetSize(), dst.offset, dst.rowPitch, dst.imageHeight);
- encoders.EnsureBlit(commandBuffer);
for (uint32_t i = 0; i < splittedCopies.count; ++i) {
const TextureBufferCopySplit::CopyInfo& copyInfo = splittedCopies.copies[i];
- [encoders.blit copyFromTexture:texture->GetMTLTexture()
- sourceSlice:src.arrayLayer
- sourceLevel:src.mipLevel
- sourceOrigin:copyInfo.textureOrigin
- sourceSize:copyInfo.copyExtent
- toBuffer:buffer->GetMTLBuffer()
- destinationOffset:copyInfo.bufferOffset
- destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage];
+ [commandContext->EnsureBlit() copyFromTexture:texture->GetMTLTexture()
+ sourceSlice:src.arrayLayer
+ sourceLevel:src.mipLevel
+ sourceOrigin:copyInfo.textureOrigin
+ sourceSize:copyInfo.copyExtent
+ toBuffer:buffer->GetMTLBuffer()
+ destinationOffset:copyInfo.bufferOffset
+ destinationBytesPerRow:copyInfo.bytesPerRow
+ destinationBytesPerImage:copyInfo.bytesPerImage];
}
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
@@ -691,42 +804,48 @@ namespace dawn_native { namespace metal {
Texture* srcTexture = ToBackend(copy->source.texture.Get());
Texture* dstTexture = ToBackend(copy->destination.texture.Get());
- encoders.EnsureBlit(commandBuffer);
-
- [encoders.blit copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:copy->source.arrayLayer
- sourceLevel:copy->source.mipLevel
- sourceOrigin:MakeMTLOrigin(copy->source.origin)
- sourceSize:MakeMTLSize(copy->copySize)
- toTexture:dstTexture->GetMTLTexture()
- destinationSlice:copy->destination.arrayLayer
- destinationLevel:copy->destination.mipLevel
- destinationOrigin:MakeMTLOrigin(copy->destination.origin)];
- } break;
+ EnsureSourceTextureInitialized(srcTexture, copy->copySize, copy->source);
+ EnsureDestinationTextureInitialized(dstTexture, copy->copySize,
+ copy->destination);
+
+ [commandContext->EnsureBlit()
+ copyFromTexture:srcTexture->GetMTLTexture()
+ sourceSlice:copy->source.arrayLayer
+ sourceLevel:copy->source.mipLevel
+ sourceOrigin:MakeMTLOrigin(copy->source.origin)
+ sourceSize:MakeMTLSize(copy->copySize)
+ toTexture:dstTexture->GetMTLTexture()
+ destinationSlice:copy->destination.arrayLayer
+ destinationLevel:copy->destination.mipLevel
+ destinationOrigin:MakeMTLOrigin(copy->destination.origin)];
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
- encoders.Finish();
+ commandContext->EndBlit();
}
- void CommandBuffer::EncodeComputePass(id<MTLCommandBuffer> commandBuffer) {
+ void CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
ComputePipeline* lastPipeline = nullptr;
StorageBufferLengthTracker storageBufferLengths = {};
BindGroupTracker bindGroups(&storageBufferLengths);
- // Will be autoreleased
- id<MTLComputeCommandEncoder> encoder = [commandBuffer computeCommandEncoder];
+ id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
Command type;
while (mCommands.NextCommandId(&type)) {
switch (type) {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
- [encoder endEncoding];
+ commandContext->EndCompute();
return;
- } break;
+ }
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
@@ -736,7 +855,8 @@ namespace dawn_native { namespace metal {
[encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
@@ -750,7 +870,8 @@ namespace dawn_native { namespace metal {
indirectBufferOffset:dispatch->indirectOffset
threadsPerThreadgroup:lastPipeline
->GetLocalWorkGroupSize()];
- } break;
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
@@ -759,7 +880,8 @@ namespace dawn_native { namespace metal {
bindGroups.OnSetPipeline(lastPipeline);
lastPipeline->Encode(encoder);
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
@@ -770,7 +892,8 @@ namespace dawn_native { namespace metal {
bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
cmd->dynamicOffsetCount, dynamicOffsets);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
@@ -779,13 +902,15 @@ namespace dawn_native { namespace metal {
[encoder insertDebugSignpost:mtlLabel];
[mtlLabel release];
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
mCommands.NextCommand<PopDebugGroupCmd>();
[encoder popDebugGroup];
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
@@ -794,9 +919,13 @@ namespace dawn_native { namespace metal {
[encoder pushDebugGroup:mtlLabel];
[mtlLabel release];
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
@@ -804,12 +933,11 @@ namespace dawn_native { namespace metal {
UNREACHABLE();
}
- void CommandBuffer::EncodeRenderPass(id<MTLCommandBuffer> commandBuffer,
+ void CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
MTLRenderPassDescriptor* mtlRenderPass,
- GlobalEncoders* globalEncoders,
uint32_t width,
uint32_t height) {
- ASSERT(mtlRenderPass && globalEncoders);
+ ASSERT(mtlRenderPass);
Device* device = ToBackend(GetDevice());
@@ -852,17 +980,16 @@ namespace dawn_native { namespace metal {
// If we need to use a temporary resolve texture we need to copy the result of MSAA
// resolve back to the true resolve targets.
if (useTemporaryResolveTexture) {
- EncodeRenderPass(commandBuffer, mtlRenderPass, globalEncoders, width, height);
+ EncodeRenderPass(commandContext, mtlRenderPass, width, height);
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
if (trueResolveTextures[i] == nil) {
continue;
}
ASSERT(temporaryResolveTextures[i] != nil);
- CopyIntoTrueResolveTarget(commandBuffer, trueResolveTextures[i],
+ CopyIntoTrueResolveTarget(commandContext, trueResolveTextures[i],
trueResolveLevels[i], trueResolveSlices[i],
- temporaryResolveTextures[i], width, height,
- globalEncoders);
+ temporaryResolveTextures[i], width, height);
}
return;
}
@@ -876,7 +1003,7 @@ namespace dawn_native { namespace metal {
std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
if (mtlRenderPass.colorAttachments[i].storeAction ==
- MTLStoreActionStoreAndMultisampleResolve) {
+ kMTLStoreActionStoreAndMultisampleResolve) {
hasStoreAndMSAAResolve = true;
resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
@@ -887,16 +1014,16 @@ namespace dawn_native { namespace metal {
// If we found a store + MSAA resolve we need to resolve in a different render pass.
if (hasStoreAndMSAAResolve) {
- EncodeRenderPass(commandBuffer, mtlRenderPass, globalEncoders, width, height);
- ResolveInAnotherRenderPass(commandBuffer, mtlRenderPass, resolveTextures);
+ EncodeRenderPass(commandContext, mtlRenderPass, width, height);
+ ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
return;
}
}
- EncodeRenderPassInternal(commandBuffer, mtlRenderPass, width, height);
+ EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height);
}
- void CommandBuffer::EncodeRenderPassInternal(id<MTLCommandBuffer> commandBuffer,
+ void CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
MTLRenderPassDescriptor* mtlRenderPass,
uint32_t width,
uint32_t height) {
@@ -907,9 +1034,7 @@ namespace dawn_native { namespace metal {
StorageBufferLengthTracker storageBufferLengths = {};
BindGroupTracker bindGroups(&storageBufferLengths);
- // This will be autoreleased
- id<MTLRenderCommandEncoder> encoder =
- [commandBuffer renderCommandEncoderWithDescriptor:mtlRenderPass];
+ id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
switch (type) {
@@ -922,13 +1047,22 @@ namespace dawn_native { namespace metal {
// The instance count must be non-zero, otherwise no-op
if (draw->instanceCount != 0) {
- [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- vertexStart:draw->firstVertex
- vertexCount:draw->vertexCount
- instanceCount:draw->instanceCount
- baseInstance:draw->firstInstance];
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
+ if (draw->firstInstance == 0) {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ vertexStart:draw->firstVertex
+ vertexCount:draw->vertexCount
+ instanceCount:draw->instanceCount
+ baseInstance:draw->firstInstance];
+ }
}
- } break;
+ break;
+ }
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
@@ -941,17 +1075,30 @@ namespace dawn_native { namespace metal {
// The index and instance count must be non-zero, otherwise no-op
if (draw->indexCount != 0 && draw->instanceCount != 0) {
- [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
- indexCount:draw->indexCount
- indexType:lastPipeline->GetMTLIndexType()
- indexBuffer:indexBuffer
- indexBufferOffset:indexBufferBaseOffset +
- draw->firstIndex * formatSize
- instanceCount:draw->instanceCount
- baseVertex:draw->baseVertex
- baseInstance:draw->firstInstance];
+ // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
+ // baseVertex.
+ if (draw->baseVertex == 0 && draw->firstInstance == 0) {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:lastPipeline->GetMTLIndexType()
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * formatSize
+ instanceCount:draw->instanceCount];
+ } else {
+ [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+ indexCount:draw->indexCount
+ indexType:lastPipeline->GetMTLIndexType()
+ indexBuffer:indexBuffer
+ indexBufferOffset:indexBufferBaseOffset +
+ draw->firstIndex * formatSize
+ instanceCount:draw->instanceCount
+ baseVertex:draw->baseVertex
+ baseInstance:draw->firstInstance];
+ }
}
- } break;
+ break;
+ }
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
@@ -965,7 +1112,8 @@ namespace dawn_native { namespace metal {
[encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
indirectBuffer:indirectBuffer
indirectBufferOffset:draw->indirectOffset];
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
@@ -982,7 +1130,8 @@ namespace dawn_native { namespace metal {
indexBufferOffset:indexBufferBaseOffset
indirectBuffer:indirectBuffer
indirectBufferOffset:draw->indirectOffset];
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
@@ -991,13 +1140,15 @@ namespace dawn_native { namespace metal {
[encoder insertDebugSignpost:mtlLabel];
[mtlLabel release];
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
iter->NextCommand<PopDebugGroupCmd>();
[encoder popDebugGroup];
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
@@ -1006,7 +1157,8 @@ namespace dawn_native { namespace metal {
[encoder pushDebugGroup:mtlLabel];
[mtlLabel release];
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
@@ -1021,7 +1173,8 @@ namespace dawn_native { namespace metal {
newPipeline->Encode(encoder);
lastPipeline = newPipeline;
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
@@ -1032,21 +1185,24 @@ namespace dawn_native { namespace metal {
bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
cmd->dynamicOffsetCount, dynamicOffsets);
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
auto b = ToBackend(cmd->buffer.Get());
indexBuffer = b->GetMTLBuffer();
indexBufferBaseOffset = cmd->offset;
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
cmd->offset);
- } break;
+ break;
+ }
default:
UNREACHABLE();
@@ -1059,14 +1215,15 @@ namespace dawn_native { namespace metal {
switch (type) {
case Command::EndRenderPass: {
mCommands.NextCommand<EndRenderPassCmd>();
- [encoder endEncoding];
+ commandContext->EndRender();
return;
- } break;
+ }
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
[encoder setStencilReferenceValue:cmd->reference];
- } break;
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
@@ -1079,7 +1236,8 @@ namespace dawn_native { namespace metal {
viewport.zfar = cmd->maxDepth;
[encoder setViewport:viewport];
- } break;
+ break;
+ }
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
@@ -1099,7 +1257,8 @@ namespace dawn_native { namespace metal {
}
[encoder setScissorRect:rect];
- } break;
+ break;
+ }
case Command::SetBlendColor: {
SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
@@ -1107,7 +1266,8 @@ namespace dawn_native { namespace metal {
green:cmd->color.g
blue:cmd->color.b
alpha:cmd->color.a];
- } break;
+ break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
@@ -1120,9 +1280,13 @@ namespace dawn_native { namespace metal {
EncodeRenderBundleCommand(iter, type);
}
}
- } break;
+ break;
+ }
- default: { EncodeRenderBundleCommand(&mCommands, type); } break;
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h
new file mode 100644
index 00000000000..531681b4bac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.h
@@ -0,0 +1,59 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#ifndef DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+#define DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
+
+#import <Metal/Metal.h>
+
+namespace dawn_native { namespace metal {
+
+ // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
+ // Only one encoder may be open at a time.
+ class CommandRecordingContext {
+ public:
+ CommandRecordingContext();
+ CommandRecordingContext(id<MTLCommandBuffer> commands);
+
+ CommandRecordingContext(const CommandRecordingContext& rhs) = delete;
+ CommandRecordingContext& operator=(const CommandRecordingContext& rhs) = delete;
+
+ CommandRecordingContext(CommandRecordingContext&& rhs);
+ CommandRecordingContext& operator=(CommandRecordingContext&& rhs);
+
+ ~CommandRecordingContext();
+
+ id<MTLCommandBuffer> GetCommands();
+
+ id<MTLCommandBuffer> AcquireCommands();
+
+ id<MTLBlitCommandEncoder> EnsureBlit();
+ void EndBlit();
+
+ id<MTLComputeCommandEncoder> BeginCompute();
+ void EndCompute();
+
+ id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
+ void EndRender();
+
+ private:
+ id<MTLCommandBuffer> mCommands = nil;
+ id<MTLBlitCommandEncoder> mBlit = nil;
+ id<MTLComputeCommandEncoder> mCompute = nil;
+ id<MTLRenderCommandEncoder> mRender = nil;
+ bool mInEncoder = false;
+ };
+
+}} // namespace dawn_native::metal
+
+#endif // DAWNNATIVE_METAL_COMMANDRECORDINGCONTEXT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
new file mode 100644
index 00000000000..3ede6268beb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandRecordingContext.mm
@@ -0,0 +1,119 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/metal/CommandRecordingContext.h"
+
+#include "common/Assert.h"
+
+namespace dawn_native { namespace metal {
+
+ CommandRecordingContext::CommandRecordingContext() = default;
+
+ CommandRecordingContext::CommandRecordingContext(id<MTLCommandBuffer> commands)
+ : mCommands(commands) {
+ }
+
+ CommandRecordingContext::CommandRecordingContext(CommandRecordingContext&& rhs)
+ : mCommands(rhs.AcquireCommands()) {
+ }
+
+ CommandRecordingContext& CommandRecordingContext::operator=(CommandRecordingContext&& rhs) {
+ mCommands = rhs.AcquireCommands();
+ return *this;
+ }
+
+ CommandRecordingContext::~CommandRecordingContext() {
+ // Commands must be acquired.
+ ASSERT(mCommands == nil);
+ }
+
+ id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
+ return mCommands;
+ }
+
+ id<MTLCommandBuffer> CommandRecordingContext::AcquireCommands() {
+ ASSERT(!mInEncoder);
+
+ id<MTLCommandBuffer> commands = mCommands;
+ mCommands = nil;
+ return commands;
+ }
+
+ id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
+ ASSERT(mCommands != nil);
+
+ if (mBlit == nil) {
+ ASSERT(!mInEncoder);
+ mInEncoder = true;
+ // The autorelease pool may drain before the encoder is ended. Retain so it stays alive.
+ mBlit = [[mCommands blitCommandEncoder] retain];
+ }
+ return mBlit;
+ }
+
+ void CommandRecordingContext::EndBlit() {
+ ASSERT(mCommands != nil);
+
+ if (mBlit != nil) {
+ [mBlit endEncoding];
+ [mBlit release];
+ mBlit = nil;
+ mInEncoder = false;
+ }
+ }
+
+ id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
+ ASSERT(mCommands != nil);
+ ASSERT(mCompute == nil);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The autorelease pool may drain before the encoder is ended. Retain so it stays alive.
+ mCompute = [[mCommands computeCommandEncoder] retain];
+ return mCompute;
+ }
+
+ void CommandRecordingContext::EndCompute() {
+ ASSERT(mCommands != nil);
+ ASSERT(mCompute != nil);
+
+ [mCompute endEncoding];
+ [mCompute release];
+ mCompute = nil;
+ mInEncoder = false;
+ }
+
+ id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
+ MTLRenderPassDescriptor* descriptor) {
+ ASSERT(mCommands != nil);
+ ASSERT(mRender == nil);
+ ASSERT(!mInEncoder);
+
+ mInEncoder = true;
+ // The autorelease pool may drain before the encoder is ended. Retain so it stays alive.
+ mRender = [[mCommands renderCommandEncoderWithDescriptor:descriptor] retain];
+ return mRender;
+ }
+
+ void CommandRecordingContext::EndRender() {
+ ASSERT(mCommands != nil);
+ ASSERT(mRender != nil);
+
+ [mRender endEncoding];
+ [mRender release];
+ mRender = nil;
+ mInEncoder = false;
+ }
+
+}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
index 71b5ba36bcf..6ff1d01cd03 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.h
@@ -25,7 +25,8 @@ namespace dawn_native { namespace metal {
class ComputePipeline : public ComputePipelineBase {
public:
- ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor);
+ static ResultOrError<ComputePipeline*> Create(Device* device,
+ const ComputePipelineDescriptor* descriptor);
~ComputePipeline();
void Encode(id<MTLComputeCommandEncoder> encoder);
@@ -33,6 +34,9 @@ namespace dawn_native { namespace metal {
bool RequiresStorageBufferLength() const;
private:
+ using ComputePipelineBase::ComputePipelineBase;
+ MaybeError Initialize(const ComputePipelineDescriptor* descriptor);
+
id<MTLComputePipelineState> mMtlComputePipelineState = nil;
MTLSize mLocalWorkgroupSize;
bool mRequiresStorageBufferLength;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
index fd723643e91..e9252af5df1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
@@ -19,27 +19,37 @@
namespace dawn_native { namespace metal {
- ComputePipeline::ComputePipeline(Device* device, const ComputePipelineDescriptor* descriptor)
- : ComputePipelineBase(device, descriptor) {
+ // static
+ ResultOrError<ComputePipeline*> ComputePipeline::Create(
+ Device* device,
+ const ComputePipelineDescriptor* descriptor) {
+ std::unique_ptr<ComputePipeline> pipeline =
+ std::make_unique<ComputePipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError ComputePipeline::Initialize(const ComputePipelineDescriptor* descriptor) {
auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
- const ShaderModule* computeModule = ToBackend(descriptor->computeStage.module);
+ ShaderModule* computeModule = ToBackend(descriptor->computeStage.module);
const char* computeEntryPoint = descriptor->computeStage.entryPoint;
- ShaderModule::MetalFunctionData computeData = computeModule->GetFunction(
- computeEntryPoint, SingleShaderStage::Compute, ToBackend(GetLayout()));
+ ShaderModule::MetalFunctionData computeData;
+ DAWN_TRY(computeModule->GetFunction(computeEntryPoint, SingleShaderStage::Compute,
+ ToBackend(GetLayout()), &computeData));
NSError* error = nil;
mMtlComputePipelineState =
[mtlDevice newComputePipelineStateWithFunction:computeData.function error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
- GetDevice()->HandleError(wgpu::ErrorType::DeviceLost, "Error creating pipeline state");
- return;
+ return DAWN_INTERNAL_ERROR("Error creating pipeline state");
}
// Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
mLocalWorkgroupSize = computeData.localWorkgroupSize;
mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+ return {};
}
ComputePipeline::~ComputePipeline() {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index 4424dc89104..6fa5b72c63d 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -19,6 +19,7 @@
#include "common/Serial.h"
#include "dawn_native/Device.h"
+#include "dawn_native/metal/CommandRecordingContext.h"
#include "dawn_native/metal/Forward.h"
#import <IOSurface/IOSurfaceRef.h>
@@ -48,13 +49,13 @@ namespace dawn_native { namespace metal {
id<MTLDevice> GetMTLDevice();
id<MTLCommandQueue> GetMTLQueue();
- id<MTLCommandBuffer> GetPendingCommandBuffer();
+ CommandRecordingContext* GetPendingCommandContext();
Serial GetPendingCommandSerial() const override;
void SubmitPendingCommandBuffer();
MapRequestTracker* GetMapTracker() const;
- TextureBase* CreateTextureWrappingIOSurface(const TextureDescriptor* descriptor,
+ TextureBase* CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane);
void WaitForCommandsToBeScheduled();
@@ -84,19 +85,25 @@ namespace dawn_native { namespace metal {
const ShaderModuleDescriptor* descriptor) override;
ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
+ ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
ResultOrError<TextureBase*> CreateTextureImpl(const TextureDescriptor* descriptor) override;
ResultOrError<TextureViewBase*> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
void InitTogglesFromDriver();
+ void Destroy() override;
+ MaybeError WaitForIdleForDestruction() override;
id<MTLDevice> mMtlDevice = nil;
id<MTLCommandQueue> mCommandQueue = nil;
std::unique_ptr<MapRequestTracker> mMapTracker;
Serial mLastSubmittedSerial = 0;
- id<MTLCommandBuffer> mPendingCommands = nil;
+ CommandRecordingContext mCommandContext;
// The completed serial is updated in a Metal completion handler that can be fired on a
// different thread, so it needs to be atomic.
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index 504e6b3ade8..12ddac76054 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -15,9 +15,11 @@
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/BackendConnection.h"
-#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/ErrorData.h"
+#include "dawn_native/metal/BindGroupLayoutMTL.h"
+#include "dawn_native/metal/BindGroupMTL.h"
#include "dawn_native/metal/BufferMTL.h"
#include "dawn_native/metal/CommandBufferMTL.h"
#include "dawn_native/metal/ComputePipelineMTL.h"
@@ -53,27 +55,7 @@ namespace dawn_native { namespace metal {
}
Device::~Device() {
- // Wait for all commands to be finished so we can free resources SubmitPendingCommandBuffer
- // may not increment the pendingCommandSerial if there are no pending commands, so we can't
- // store the pendingSerial before SubmitPendingCommandBuffer then wait for it to be passed.
- // Instead we submit and wait for the serial before the next pendingCommandSerial.
- SubmitPendingCommandBuffer();
- while (GetCompletedCommandSerial() != mLastSubmittedSerial) {
- usleep(100);
- }
- Tick();
-
- [mPendingCommands release];
- mPendingCommands = nil;
-
- mMapTracker = nullptr;
- mDynamicUploader = nullptr;
-
- [mCommandQueue release];
- mCommandQueue = nil;
-
- [mMtlDevice release];
- mMtlDevice = nil;
+ BaseDestructor();
}
void Device::InitTogglesFromDriver() {
@@ -88,6 +70,22 @@ namespace dawn_native { namespace metal {
#endif
// On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+
+ bool haveSamplerCompare = true;
+#if defined(DAWN_PLATFORM_IOS)
+ haveSamplerCompare = [mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+ // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
+ SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
+
+ bool haveBaseVertexBaseInstance = true;
+#if defined(DAWN_PLATFORM_IOS)
+ haveBaseVertexBaseInstance =
+ [mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+#endif
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
+ SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
}
// TODO(jiawei.shao@intel.com): tighten this workaround when the driver bug is fixed.
@@ -96,7 +94,7 @@ namespace dawn_native { namespace metal {
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
- return new BindGroup(this, descriptor);
+ return BindGroup::Create(this, descriptor);
}
ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
@@ -111,7 +109,7 @@ namespace dawn_native { namespace metal {
}
ResultOrError<ComputePipelineBase*> Device::CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) {
- return new ComputePipeline(this, descriptor);
+ return ComputePipeline::Create(this, descriptor);
}
ResultOrError<PipelineLayoutBase*> Device::CreatePipelineLayoutImpl(
const PipelineLayoutDescriptor* descriptor) {
@@ -122,10 +120,10 @@ namespace dawn_native { namespace metal {
}
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
- return new RenderPipeline(this, descriptor);
+ return RenderPipeline::Create(this, descriptor);
}
ResultOrError<SamplerBase*> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
- return new Sampler(this, descriptor);
+ return Sampler::Create(this, descriptor);
}
ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor) {
@@ -133,7 +131,13 @@ namespace dawn_native { namespace metal {
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new SwapChain(this, descriptor);
+ return new OldSwapChain(this, descriptor);
+ }
+ ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return new SwapChain(this, surface, previousSwapChain, descriptor);
}
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return new Texture(this, descriptor);
@@ -163,7 +167,7 @@ namespace dawn_native { namespace metal {
mDynamicUploader->Deallocate(completedSerial);
mMapTracker->Tick(completedSerial);
- if (mPendingCommands != nil) {
+ if (mCommandContext.GetCommands() != nil) {
SubmitPendingCommandBuffer();
} else if (completedSerial == mLastSubmittedSerial) {
// If there's no GPU work in flight we still need to artificially increment the serial
@@ -183,46 +187,43 @@ namespace dawn_native { namespace metal {
return mCommandQueue;
}
- id<MTLCommandBuffer> Device::GetPendingCommandBuffer() {
- TRACE_EVENT0(GetPlatform(), General, "DeviceMTL::GetPendingCommandBuffer");
- if (mPendingCommands == nil) {
- mPendingCommands = [mCommandQueue commandBuffer];
- [mPendingCommands retain];
+ CommandRecordingContext* Device::GetPendingCommandContext() {
+ if (mCommandContext.GetCommands() == nil) {
+ TRACE_EVENT0(GetPlatform(), General, "[MTLCommandQueue commandBuffer]");
+ // The MTLCommandBuffer will be autoreleased by default.
+ // The autorelease pool may drain before the command buffer is submitted. Retain so it
+ // stays alive.
+ mCommandContext = CommandRecordingContext([[mCommandQueue commandBuffer] retain]);
}
- return mPendingCommands;
+ return &mCommandContext;
}
void Device::SubmitPendingCommandBuffer() {
- if (mPendingCommands == nil) {
+ if (mCommandContext.GetCommands() == nil) {
return;
}
mLastSubmittedSerial++;
+ // Ensure the blit encoder is ended. It may have been opened to perform a lazy clear or
+ // buffer upload.
+ mCommandContext.EndBlit();
+
+ // Acquire the pending command buffer, which is retained. It must be released later.
+ id<MTLCommandBuffer> pendingCommands = mCommandContext.AcquireCommands();
+
// Replace mLastSubmittedCommands with the mutex held so we avoid races between the
// schedule handler and this code.
{
std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
- [mLastSubmittedCommands release];
- mLastSubmittedCommands = mPendingCommands;
+ mLastSubmittedCommands = pendingCommands;
}
- // Ok, ObjC blocks are weird. My understanding is that local variables are captured by
- // value so this-> works as expected. However it is unclear how members are captured, (are
- // they captured using this-> or by value?). To be safe we copy members to local variables
- // to ensure they are captured "by value".
-
- // Free mLastSubmittedCommands as soon as it is scheduled so that it doesn't hold
- // references to its resources. Make a local copy of pendingCommands first so it is
- // captured "by-value" by the block.
- id<MTLCommandBuffer> pendingCommands = mPendingCommands;
-
- [mPendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
+ [pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
// This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
// is a local value (and not the member itself).
std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
if (this->mLastSubmittedCommands == pendingCommands) {
- [this->mLastSubmittedCommands release];
this->mLastSubmittedCommands = nil;
}
}];
@@ -230,7 +231,7 @@ namespace dawn_native { namespace metal {
// Update the completed serial once the completed handler is fired. Make a local copy of
// mLastSubmittedSerial so it is captured by value.
Serial pendingSerial = mLastSubmittedSerial;
- [mPendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
+ [pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
pendingSerial);
ASSERT(pendingSerial > mCompletedSerial.load());
@@ -239,8 +240,8 @@ namespace dawn_native { namespace metal {
TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
pendingSerial);
- [mPendingCommands commit];
- mPendingCommands = nil;
+ [pendingCommands commit];
+ [pendingCommands release];
}
MapRequestTracker* Device::GetMapTracker() const {
@@ -261,25 +262,24 @@ namespace dawn_native { namespace metal {
uint64_t size) {
id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
- id<MTLCommandBuffer> commandBuffer = GetPendingCommandBuffer();
- id<MTLBlitCommandEncoder> encoder = [commandBuffer blitCommandEncoder];
- [encoder copyFromBuffer:uploadBuffer
- sourceOffset:sourceOffset
- toBuffer:buffer
- destinationOffset:destinationOffset
- size:size];
- [encoder endEncoding];
-
+ [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
+ sourceOffset:sourceOffset
+ toBuffer:buffer
+ destinationOffset:destinationOffset
+ size:size];
return {};
}
- TextureBase* Device::CreateTextureWrappingIOSurface(const TextureDescriptor* descriptor,
+ TextureBase* Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane) {
- if (ConsumedError(ValidateTextureDescriptor(this, descriptor))) {
+ const TextureDescriptor* textureDescriptor =
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
+ if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
return nullptr;
}
- if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, descriptor, ioSurface, plane))) {
+ if (ConsumedError(
+ ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface, plane))) {
return nullptr;
}
@@ -291,4 +291,35 @@ namespace dawn_native { namespace metal {
[mLastSubmittedCommands waitUntilScheduled];
}
+ MaybeError Device::WaitForIdleForDestruction() {
+ [mCommandContext.AcquireCommands() release];
+
+ // Wait for all commands to be finished so we can free resources
+ while (GetCompletedCommandSerial() != mLastSubmittedSerial) {
+ usleep(100);
+ }
+
+ // Artificially increase the serials so work that was pending knows it can complete.
+ mCompletedSerial++;
+ mLastSubmittedSerial++;
+
+ DAWN_TRY(TickImpl());
+ return {};
+ }
+
+ void Device::Destroy() {
+ ASSERT(mLossStatus != LossStatus::AlreadyLost);
+
+ [mCommandContext.AcquireCommands() release];
+
+ mMapTracker = nullptr;
+ mDynamicUploader = nullptr;
+
+ [mCommandQueue release];
+ mCommandQueue = nil;
+
+ [mMtlDevice release];
+ mMtlDevice = nil;
+ }
+
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h b/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
index 4e889cddec5..a773a182c89 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/Forward.h
@@ -17,16 +17,11 @@
#include "dawn_native/ToBackend.h"
-namespace {
- class BindGroupBase;
- class BindGroup;
-} // namespace
-
namespace dawn_native { namespace metal {
class Adapter;
- using BindGroup = BindGroupBase;
- using BindGroupLayout = BindGroupLayoutBase;
+ class BindGroup;
+ class BindGroupLayout;
class Buffer;
class CommandBuffer;
class ComputePipeline;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm b/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
index 22b583af18c..24c44810e91 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
@@ -27,14 +27,15 @@ namespace dawn_native { namespace metal {
return device->GetMTLDevice();
}
+ ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
+ : ExternalImageDescriptor(ExternalImageDescriptorType::IOSurface) {
+ }
+
WGPUTexture WrapIOSurface(WGPUDevice cDevice,
- const WGPUTextureDescriptor* cDescriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane) {
+ const ExternalImageDescriptorIOSurface* cDescriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
- const TextureDescriptor* descriptor =
- reinterpret_cast<const TextureDescriptor*>(cDescriptor);
- TextureBase* texture = device->CreateTextureWrappingIOSurface(descriptor, ioSurface, plane);
+ TextureBase* texture = device->CreateTextureWrappingIOSurface(
+ cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
return reinterpret_cast<WGPUTexture>(texture);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index dc528915c35..c98d5768698 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -29,31 +29,32 @@ namespace dawn_native { namespace metal {
uint32_t textureIndex = 0;
for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
- for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
- if (!(groupInfo.visibilities[binding] & StageBit(stage))) {
- continue;
- }
- if (!groupInfo.mask[binding]) {
+ for (BindingIndex bindingIndex = 0;
+ bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+ if (!(bindingInfo.visibility & StageBit(stage))) {
continue;
}
- switch (groupInfo.types[binding]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
case wgpu::BindingType::StorageBuffer:
- mIndexInfo[stage][group][binding] = bufferIndex;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ mIndexInfo[stage][group][bindingIndex] = bufferIndex;
bufferIndex++;
break;
case wgpu::BindingType::Sampler:
- mIndexInfo[stage][group][binding] = samplerIndex;
+ mIndexInfo[stage][group][bindingIndex] = samplerIndex;
samplerIndex++;
break;
case wgpu::BindingType::SampledTexture:
- mIndexInfo[stage][group][binding] = textureIndex;
+ mIndexInfo[stage][group][bindingIndex] = textureIndex;
textureIndex++;
break;
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
index dd360e970b0..7c5967ad81d 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
@@ -27,11 +27,11 @@ namespace dawn_native { namespace metal {
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
device->Tick();
- id<MTLCommandBuffer> commandBuffer = device->GetPendingCommandBuffer();
+ CommandRecordingContext* commandContext = device->GetPendingCommandContext();
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
- ToBackend(commands[i])->FillCommands(commandBuffer);
+ ToBackend(commands[i])->FillCommands(commandContext);
}
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
index bce358b92c8..47fc0489abc 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
@@ -25,7 +25,8 @@ namespace dawn_native { namespace metal {
class RenderPipeline : public RenderPipelineBase {
public:
- RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+ static ResultOrError<RenderPipeline*> Create(Device* device,
+ const RenderPipelineDescriptor* descriptor);
~RenderPipeline();
MTLIndexType GetMTLIndexType() const;
@@ -44,6 +45,9 @@ namespace dawn_native { namespace metal {
wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
private:
+ using RenderPipelineBase::RenderPipelineBase;
+ MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+
MTLVertexDescriptor* MakeVertexDesc();
MTLIndexType mMtlIndexType;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 3c4d8523f6c..575140f86df 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -311,39 +311,58 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
- RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
- : RenderPipelineBase(device, descriptor),
- mMtlIndexType(MTLIndexFormat(GetVertexStateDescriptor()->indexFormat)),
- mMtlPrimitiveTopology(MTLPrimitiveTopology(GetPrimitiveTopology())),
- mMtlFrontFace(MTLFrontFace(GetFrontFace())),
- mMtlCullMode(ToMTLCullMode(GetCullMode())) {
- auto mtlDevice = device->GetMTLDevice();
+ // static
+ ResultOrError<RenderPipeline*> RenderPipeline::Create(
+ Device* device,
+ const RenderPipelineDescriptor* descriptor) {
+ std::unique_ptr<RenderPipeline> pipeline =
+ std::make_unique<RenderPipeline>(device, descriptor);
+ DAWN_TRY(pipeline->Initialize(descriptor));
+ return pipeline.release();
+ }
+
+ MaybeError RenderPipeline::Initialize(const RenderPipelineDescriptor* descriptor) {
+ mMtlIndexType = MTLIndexFormat(GetVertexStateDescriptor()->indexFormat);
+ mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
+ mMtlFrontFace = MTLFrontFace(GetFrontFace());
+ mMtlCullMode = ToMTLCullMode(GetCullMode());
+ auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
MTLRenderPipelineDescriptor* descriptorMTL = [MTLRenderPipelineDescriptor new];
- const ShaderModule* vertexModule = ToBackend(descriptor->vertexStage.module);
+ ShaderModule* vertexModule = ToBackend(descriptor->vertexStage.module);
const char* vertexEntryPoint = descriptor->vertexStage.entryPoint;
- ShaderModule::MetalFunctionData vertexData = vertexModule->GetFunction(
- vertexEntryPoint, SingleShaderStage::Vertex, ToBackend(GetLayout()));
+ ShaderModule::MetalFunctionData vertexData;
+ DAWN_TRY(vertexModule->GetFunction(vertexEntryPoint, SingleShaderStage::Vertex,
+ ToBackend(GetLayout()), &vertexData));
+
descriptorMTL.vertexFunction = vertexData.function;
if (vertexData.needsStorageBufferLength) {
mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
}
- const ShaderModule* fragmentModule = ToBackend(descriptor->fragmentStage->module);
+ ShaderModule* fragmentModule = ToBackend(descriptor->fragmentStage->module);
const char* fragmentEntryPoint = descriptor->fragmentStage->entryPoint;
- ShaderModule::MetalFunctionData fragmentData = fragmentModule->GetFunction(
- fragmentEntryPoint, SingleShaderStage::Fragment, ToBackend(GetLayout()));
+ ShaderModule::MetalFunctionData fragmentData;
+ DAWN_TRY(fragmentModule->GetFunction(fragmentEntryPoint, SingleShaderStage::Fragment,
+ ToBackend(GetLayout()), &fragmentData));
+
descriptorMTL.fragmentFunction = fragmentData.function;
if (fragmentData.needsStorageBufferLength) {
mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
}
if (HasDepthStencilAttachment()) {
- // TODO(kainino@chromium.org): Handle depth-only and stencil-only formats.
wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
- descriptorMTL.depthAttachmentPixelFormat = MetalPixelFormat(depthStencilFormat);
- descriptorMTL.stencilAttachmentPixelFormat = MetalPixelFormat(depthStencilFormat);
+ const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
+ MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
+
+ if (internalFormat.HasDepth()) {
+ descriptorMTL.depthAttachmentPixelFormat = metalFormat;
+ }
+ if (internalFormat.HasStencil()) {
+ descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
+ }
}
const ShaderModuleBase::FragmentOutputBaseTypes& fragmentOutputBaseTypes =
@@ -372,9 +391,7 @@ namespace dawn_native { namespace metal {
[descriptorMTL release];
if (error != nil) {
NSLog(@" error => %@", error);
- device->HandleError(wgpu::ErrorType::DeviceLost,
- "Error creating rendering pipeline state");
- return;
+ return DAWN_INTERNAL_ERROR("Error creating rendering pipeline state");
}
}
@@ -385,6 +402,8 @@ namespace dawn_native { namespace metal {
MakeDepthStencilDesc(GetDepthStencilStateDescriptor());
mMtlDepthStencilState = [mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc];
[depthStencilDesc release];
+
+ return {};
}
RenderPipeline::~RenderPipeline() {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
index 776c9356295..205a7ecd2d1 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.h
@@ -25,12 +25,13 @@ namespace dawn_native { namespace metal {
class Sampler : public SamplerBase {
public:
- Sampler(Device* device, const SamplerDescriptor* descriptor);
+ static ResultOrError<Sampler*> Create(Device* device, const SamplerDescriptor* descriptor);
~Sampler();
id<MTLSamplerState> GetMTLSamplerState();
private:
+ Sampler(Device* device, const SamplerDescriptor* descriptor);
id<MTLSamplerState> mMtlSamplerState = nil;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
index c58e5823af5..e8946867dad 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
@@ -50,6 +50,16 @@ namespace dawn_native { namespace metal {
}
}
+ // static
+ ResultOrError<Sampler*> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+ if (descriptor->compare != wgpu::CompareFunction::Never &&
+ device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare)) {
+ return DAWN_VALIDATION_ERROR("Sampler compare function not supported.");
+ }
+
+ return new Sampler(device, descriptor);
+ }
+
Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
: SamplerBase(device, descriptor) {
MTLSamplerDescriptor* mtlDesc = [MTLSamplerDescriptor new];
@@ -64,7 +74,12 @@ namespace dawn_native { namespace metal {
mtlDesc.lodMinClamp = descriptor->lodMinClamp;
mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
- mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+
+ if (descriptor->compare != wgpu::CompareFunction::Never) {
+ // Anything other than Never is unsupported before A9, which we validate in
+ // Sampler::Create.
+ mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+ }
mMtlSamplerState = [device->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc];
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
index e259b691a20..b270034d5d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
@@ -36,21 +36,24 @@ namespace dawn_native { namespace metal {
const ShaderModuleDescriptor* descriptor);
struct MetalFunctionData {
- id<MTLFunction> function;
+ id<MTLFunction> function = nil;
MTLSize localWorkgroupSize;
bool needsStorageBufferLength;
~MetalFunctionData() {
[function release];
}
};
- MetalFunctionData GetFunction(const char* functionName,
- SingleShaderStage functionStage,
- const PipelineLayout* layout) const;
+ MaybeError GetFunction(const char* functionName,
+ SingleShaderStage functionStage,
+ const PipelineLayout* layout,
+ MetalFunctionData* out);
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
+ shaderc_spvc::CompileOptions GetMSLCompileOptions();
+
// Calling compile on CompilerMSL somehow changes internal state that makes subsequent
// compiles return invalid MSL. We keep the spirv around and recreate the compiler everytime
// we need to use it.
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index c01621dacf5..bb2d4f03eb7 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -38,6 +38,20 @@ namespace dawn_native { namespace metal {
UNREACHABLE();
}
}
+
+ shaderc_spvc_execution_model ToSpvcExecutionModel(SingleShaderStage stage) {
+ switch (stage) {
+ case SingleShaderStage::Vertex:
+ return shaderc_spvc_execution_model_vertex;
+ case SingleShaderStage::Fragment:
+ return shaderc_spvc_execution_model_fragment;
+ case SingleShaderStage::Compute:
+ return shaderc_spvc_execution_model_glcompute;
+ default:
+ UNREACHABLE();
+ return shaderc_spvc_execution_model_invalid;
+ }
+ }
} // namespace
// static
@@ -57,47 +71,39 @@ namespace dawn_native { namespace metal {
MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
mSpirv.assign(descriptor->code, descriptor->code + descriptor->codeSize);
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- shaderc_spvc::CompileOptions options;
- shaderc_spvc_status status =
- mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
- if (status != shaderc_spvc_status_success) {
- return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
- }
+ shaderc_spvc::CompileOptions options = GetMSLCompileOptions();
+
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.InitializeForMsl(descriptor->code, descriptor->codeSize, options),
+ "Unable to initialize instance of spvc"));
- spirv_cross::CompilerMSL* compiler =
- reinterpret_cast<spirv_cross::CompilerMSL*>(mSpvcContext.GetCompiler());
- ExtractSpirvInfo(*compiler);
+ spirv_cross::CompilerMSL* compiler;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
+ "Unable to get cross compiler"));
+ DAWN_TRY(ExtractSpirvInfo(*compiler));
} else {
spirv_cross::CompilerMSL compiler(mSpirv);
- ExtractSpirvInfo(compiler);
+ DAWN_TRY(ExtractSpirvInfo(compiler));
}
return {};
}
- ShaderModule::MetalFunctionData ShaderModule::GetFunction(const char* functionName,
- SingleShaderStage functionStage,
- const PipelineLayout* layout) const {
+ MaybeError ShaderModule::GetFunction(const char* functionName,
+ SingleShaderStage functionStage,
+ const PipelineLayout* layout,
+ ShaderModule::MetalFunctionData* out) {
+ ASSERT(!IsError());
+ ASSERT(out);
std::unique_ptr<spirv_cross::CompilerMSL> compiler_impl;
spirv_cross::CompilerMSL* compiler;
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
- // be updated.
- shaderc_spvc::CompileOptions options;
-
- // Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
- // Because Metal will reject PointSize builtin if the shader is compiled into a render
- // pipeline that uses a non-point topology.
- // TODO (hao.x.li@intel.com): Remove this once WebGPU requires there is no
- // gl_PointSize builtin (https://github.com/gpuweb/gpuweb/issues/332).
- options.SetMSLEnablePointSizeBuiltIn(false);
-
- // Always use vertex buffer 30 (the last one in the vertex buffer table) to contain
- // the shader storage buffer lengths.
- options.SetMSLBufferSizeBufferIndex(kBufferLengthBufferSlot);
- mSpvcContext.InitializeForMsl(mSpirv.data(), mSpirv.size(), options);
- // TODO(rharrison): Handle initialize failing
-
- compiler = reinterpret_cast<spirv_cross::CompilerMSL*>(mSpvcContext.GetCompiler());
+ // Initializing the compiler is needed every call, because this method uses reflection
+ // to mutate the compiler's IR.
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.InitializeForMsl(mSpirv.data(), mSpirv.size(), GetMSLCompileOptions()),
+ "Unable to initialize instance of spvc"));
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
+ "Unable to get cross compiler"));
} else {
// If these options are changed, the values in DawnSPIRVCrossMSLFastFuzzer.cpp need to
// be updated.
@@ -125,45 +131,86 @@ namespace dawn_native { namespace metal {
// Create one resource binding entry per stage per binding.
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const auto& bgInfo = layout->GetBindGroupLayout(group)->GetBindingInfo();
- for (uint32_t binding : IterateBitSet(bgInfo.mask)) {
- for (auto stage : IterateStages(bgInfo.visibilities[binding])) {
- uint32_t index = layout->GetBindingIndexInfo(stage)[group][binding];
-
- spirv_cross::MSLResourceBinding mslBinding;
- mslBinding.stage = SpirvExecutionModelForStage(stage);
- mslBinding.desc_set = group;
- mslBinding.binding = binding;
- mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler = index;
-
- compiler->add_msl_resource_binding(mslBinding);
+ const BindGroupLayoutBase::BindingMap& bindingMap =
+ layout->GetBindGroupLayout(group)->GetBindingMap();
+
+ for (const auto& it : bindingMap) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
+
+ const BindingInfo& bindingInfo =
+ layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+
+ for (auto stage : IterateStages(bindingInfo.visibility)) {
+ uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc_msl_resource_binding mslBinding;
+ mslBinding.stage = ToSpvcExecutionModel(stage);
+ mslBinding.desc_set = group;
+ mslBinding.binding = bindingNumber;
+ mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler =
+ shaderIndex;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.AddMSLResourceBinding(mslBinding),
+ "Unable to add MSL Resource Binding"));
+ } else {
+ spirv_cross::MSLResourceBinding mslBinding;
+ mslBinding.stage = SpirvExecutionModelForStage(stage);
+ mslBinding.desc_set = group;
+ mslBinding.binding = bindingNumber;
+ mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler =
+ shaderIndex;
+
+ compiler->add_msl_resource_binding(mslBinding);
+ }
}
}
}
- MetalFunctionData result;
-
{
- spv::ExecutionModel executionModel = SpirvExecutionModelForStage(functionStage);
- auto size = compiler->get_entry_point(functionName, executionModel).workgroup_size;
- result.localWorkgroupSize = MTLSizeMake(size.x, size.y, size.z);
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc_execution_model executionModel = ToSpvcExecutionModel(functionStage);
+ shaderc_spvc_workgroup_size size;
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.GetWorkgroupSize(functionName, executionModel, &size),
+ "Unable to get workgroup size for shader"));
+ out->localWorkgroupSize = MTLSizeMake(size.x, size.y, size.z);
+ } else {
+ spv::ExecutionModel executionModel = SpirvExecutionModelForStage(functionStage);
+ auto size = compiler->get_entry_point(functionName, executionModel).workgroup_size;
+ out->localWorkgroupSize = MTLSizeMake(size.x, size.y, size.z);
+ }
}
{
// SPIRV-Cross also supports re-ordering attributes but it seems to do the correct thing
// by default.
- std::string msl = compiler->compile();
- NSString* mslSource = [NSString stringWithFormat:@"%s", msl.c_str()];
-
+ NSString* mslSource;
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompilationResult result;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.CompileShader(&result),
+ "Unable to compile MSL shader"));
+ std::string result_str;
+ DAWN_TRY(CheckSpvcSuccess(result.GetStringOutput(&result_str),
+ "Unable to get MSL shader text"));
+ mslSource = [NSString stringWithFormat:@"%s", result_str.c_str()];
+ } else {
+ std::string msl = compiler->compile();
+ mslSource = [NSString stringWithFormat:@"%s", msl.c_str()];
+ }
auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
NSError* error = nil;
id<MTLLibrary> library = [mtlDevice newLibraryWithSource:mslSource
options:nil
error:&error];
if (error != nil) {
- // TODO(cwallez@chromium.org): forward errors to caller
+ // TODO(cwallez@chromium.org): Switch that NSLog to use dawn::InfoLog or even be
+ // folded in the DAWN_VALIDATION_ERROR
NSLog(@"MTLDevice newLibraryWithSource => %@", error);
+ if (error.code != MTLLibraryErrorCompileWarning) {
+ return DAWN_VALIDATION_ERROR("Unable to create library object");
+ }
}
+
// TODO(kainino@chromium.org): make this somehow more robust; it needs to behave like
// clean_func_name:
// https://github.com/KhronosGroup/SPIRV-Cross/blob/4e915e8c483e319d0dd7a1fa22318bef28f8cca3/spirv_msl.cpp#L1213
@@ -172,13 +219,38 @@ namespace dawn_native { namespace metal {
}
NSString* name = [NSString stringWithFormat:@"%s", functionName];
- result.function = [library newFunctionWithName:name];
+ out->function = [library newFunctionWithName:name];
[library release];
}
- result.needsStorageBufferLength = compiler->needs_buffer_size_buffer();
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ DAWN_TRY(
+ CheckSpvcSuccess(mSpvcContext.NeedsBufferSizeBuffer(&out->needsStorageBufferLength),
+ "Unable to determine if shader needs buffer size buffer"));
+ } else {
+ out->needsStorageBufferLength = compiler->needs_buffer_size_buffer();
+ }
+
+ return {};
+ }
+
+ shaderc_spvc::CompileOptions ShaderModule::GetMSLCompileOptions() {
+ // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
+ // be updated.
+ shaderc_spvc::CompileOptions options = GetCompileOptions();
+
+ // Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
+ // Because Metal will reject PointSize builtin if the shader is compiled into a render
+ // pipeline that uses a non-point topology.
+ // TODO (hao.x.li@intel.com): Remove this once WebGPU requires there is no
+ // gl_PointSize builtin (https://github.com/gpuweb/gpuweb/issues/332).
+ options.SetMSLEnablePointSizeBuiltIn(false);
+
+ // Always use vertex buffer 30 (the last one in the vertex buffer table) to contain
+ // the shader storage buffer lengths.
+ options.SetMSLBufferSizeBufferIndex(kBufferLengthBufferSlot);
- return result;
+ return options;
}
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm
index 491b35cf797..390f00cfcfa 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/StagingBufferMTL.mm
@@ -32,7 +32,7 @@ namespace dawn_native { namespace metal {
mMappedPointer = [mBuffer contents];
if (mMappedPointer == nullptr) {
- return DAWN_DEVICE_LOST_ERROR("Unable to map staging buffer.");
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
index 5141ea77eeb..a4cddc1f5ae 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.h
@@ -17,20 +17,43 @@
#include "dawn_native/SwapChain.h"
+@class CAMetalLayer;
+@protocol CAMetalDrawable;
+
namespace dawn_native { namespace metal {
class Device;
+ class Texture;
- class SwapChain : public SwapChainBase {
+ class OldSwapChain : public OldSwapChainBase {
public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~SwapChain();
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain();
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
MaybeError OnBeforePresent(TextureBase* texture) override;
};
+ class SwapChain : public NewSwapChainBase {
+ public:
+ SwapChain(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ CAMetalLayer* mLayer = nullptr;
+
+ id<CAMetalDrawable> mCurrentDrawable = nil;
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+ };
+
}} // namespace dawn_native::metal
#endif // DAWNNATIVE_METAL_SWAPCHAINMTL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
index 4a35e41dee8..2a5ffb12431 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
@@ -14,15 +14,20 @@
#include "dawn_native/metal/SwapChainMTL.h"
+#include "dawn_native/Surface.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/TextureMTL.h"
#include <dawn/dawn_wsi.h>
+#import <QuartzCore/CAMetalLayer.h>
+
namespace dawn_native { namespace metal {
- SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device, descriptor) {
+ // OldSwapChain
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextMetal wsiContext = {};
wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
@@ -30,15 +35,15 @@ namespace dawn_native { namespace metal {
im.Init(im.userData, &wsiContext);
}
- SwapChain::~SwapChain() {
+ OldSwapChain::~OldSwapChain() {
}
- TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
const auto& im = GetImplementation();
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
return nullptr;
}
@@ -46,8 +51,88 @@ namespace dawn_native { namespace metal {
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
- MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError OldSwapChain::OnBeforePresent(TextureBase*) {
+ return {};
+ }
+
+ // SwapChain
+
+ SwapChain::SwapChain(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor)
+ : NewSwapChainBase(device, surface, descriptor) {
+ ASSERT(surface->GetType() == Surface::Type::MetalLayer);
+
+ if (previousSwapChain != nullptr) {
+ // TODO(cwallez@chromium.org): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ ASSERT(previousSwapChain->GetBackendType() == wgpu::BackendType::Metal);
+ previousSwapChain->DetachFromSurface();
+ }
+
+ mLayer = static_cast<CAMetalLayer*>(surface->GetMetalLayer());
+ ASSERT(mLayer != nullptr);
+
+ CGSize size = {};
+ size.width = GetWidth();
+ size.height = GetHeight();
+ [mLayer setDrawableSize:size];
+
+ [mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::OutputAttachment)];
+ [mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
+ [mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
+
+#if defined(DAWN_PLATFORM_MACOS)
+ if (@available(macos 10.13, *)) {
+ [mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
+ }
+#endif // defined(DAWN_PLATFORM_MACOS)
+
+ // There is no way to control Fifo vs. Mailbox in Metal.
+ }
+
+ SwapChain::~SwapChain() {
+ DetachFromSurface();
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ ASSERT(mCurrentDrawable != nil);
+ [mCurrentDrawable present];
+
+ mTexture->Destroy();
+ mTexture = nullptr;
+
+ [mCurrentDrawable release];
+ mCurrentDrawable = nil;
+
return {};
}
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ ASSERT(mCurrentDrawable == nil);
+ mCurrentDrawable = [mLayer nextDrawable];
+ [mCurrentDrawable retain];
+
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+
+ // mTexture will add a reference to mCurrentDrawable.texture to keep it alive.
+ mTexture =
+ AcquireRef(new Texture(ToBackend(GetDevice()), &textureDesc, mCurrentDrawable.texture));
+ return mTexture->CreateView(nullptr);
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ ASSERT((mTexture.Get() == nullptr) == (mCurrentDrawable == nil));
+
+ if (mTexture.Get() != nullptr) {
+ mTexture->Destroy();
+ mTexture = nullptr;
+
+ [mCurrentDrawable release];
+ mCurrentDrawable = nil;
+ }
+ }
+
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index 13ba9867596..0b18089f2ad 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -19,6 +19,7 @@
#include <IOSurface/IOSurfaceRef.h>
#import <Metal/Metal.h>
+#include "dawn_native/DawnNative.h"
namespace dawn_native { namespace metal {
@@ -35,16 +36,27 @@ namespace dawn_native { namespace metal {
Texture(Device* device, const TextureDescriptor* descriptor);
Texture(Device* device, const TextureDescriptor* descriptor, id<MTLTexture> mtlTexture);
Texture(Device* device,
- const TextureDescriptor* descriptor,
+ const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane);
~Texture();
id<MTLTexture> GetMTLTexture();
+ void EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount);
+
private:
void DestroyImpl() override;
+ MaybeError ClearTexture(uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ TextureBase::ClearValue clearValue);
+
id<MTLTexture> mMtlTexture = nil;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 9d7701b329a..fd18dca30aa 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -14,8 +14,12 @@
#include "dawn_native/metal/TextureMTL.h"
+#include "common/Constants.h"
+#include "common/Math.h"
#include "common/Platform.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/metal/DeviceMTL.h"
+#include "dawn_native/metal/StagingBufferMTL.h"
namespace dawn_native { namespace metal {
@@ -318,6 +322,11 @@ namespace dawn_native { namespace metal {
MTLTextureDescriptor* mtlDesc = CreateMetalTextureDescriptor(descriptor);
mMtlTexture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc];
[mtlDesc release];
+
+ if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+ device->ConsumedError(ClearTexture(0, GetNumMipLevels(), 0, GetArrayLayers(),
+ TextureBase::ClearValue::NonZero));
+ }
}
Texture::Texture(Device* device, const TextureDescriptor* descriptor, id<MTLTexture> mtlTexture)
@@ -326,16 +335,21 @@ namespace dawn_native { namespace metal {
}
Texture::Texture(Device* device,
- const TextureDescriptor* descriptor,
+ const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane)
- : TextureBase(device, descriptor, TextureState::OwnedInternal) {
- MTLTextureDescriptor* mtlDesc = CreateMetalTextureDescriptor(descriptor);
+ : TextureBase(device,
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor),
+ TextureState::OwnedInternal) {
+ MTLTextureDescriptor* mtlDesc = CreateMetalTextureDescriptor(
+ reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor));
mtlDesc.storageMode = kIOSurfaceStorageMode;
mMtlTexture = [device->GetMTLDevice() newTextureWithDescriptor:mtlDesc
iosurface:ioSurface
plane:plane];
[mtlDesc release];
+
+ SetIsSubresourceContentInitialized(descriptor->isCleared, 0, 1, 0, 1);
}
Texture::~Texture() {
@@ -353,6 +367,198 @@ namespace dawn_native { namespace metal {
return mMtlTexture;
}
+ MaybeError Texture::ClearTexture(uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ TextureBase::ClearValue clearValue) {
+ Device* device = ToBackend(GetDevice());
+
+ CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+
+ const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
+
+ if ((GetUsage() & wgpu::TextureUsage::OutputAttachment) != 0) {
+ ASSERT(GetFormat().isRenderable);
+
+ // End the blit encoder if it is open.
+ commandContext->EndBlit();
+
+ if (GetFormat().HasDepthOrStencil()) {
+ // Create a render pass to clear each subresource.
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t arrayLayer = baseArrayLayer;
+ arrayLayer < baseArrayLayer + layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ MTLRenderPassDescriptor* descriptor =
+ [MTLRenderPassDescriptor renderPassDescriptor];
+
+ if (GetFormat().HasDepth()) {
+ descriptor.depthAttachment.texture = GetMTLTexture();
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ descriptor.depthAttachment.clearDepth = dClearColor;
+ }
+ if (GetFormat().HasStencil()) {
+ descriptor.stencilAttachment.texture = GetMTLTexture();
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ descriptor.stencilAttachment.clearStencil =
+ static_cast<uint32_t>(clearColor);
+ }
+
+ commandContext->BeginRender(descriptor);
+ commandContext->EndRender();
+ }
+ }
+ } else {
+ ASSERT(GetFormat().IsColor());
+ MTLRenderPassDescriptor* descriptor = nil;
+ uint32_t attachment = 0;
+
+ // Create multiple render passes with each subresource as a color attachment to
+ // clear them all.
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ for (uint32_t arrayLayer = baseArrayLayer;
+ arrayLayer < baseArrayLayer + layerCount; arrayLayer++) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ if (descriptor == nil) {
+ descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
+ }
+
+ descriptor.colorAttachments[attachment].texture = GetMTLTexture();
+ descriptor.colorAttachments[attachment].loadAction = MTLLoadActionClear;
+ descriptor.colorAttachments[attachment].storeAction = MTLStoreActionStore;
+ descriptor.colorAttachments[attachment].clearColor =
+ MTLClearColorMake(dClearColor, dClearColor, dClearColor, dClearColor);
+ descriptor.colorAttachments[attachment].level = level;
+ descriptor.colorAttachments[attachment].slice = arrayLayer;
+
+ attachment++;
+
+ if (attachment == kMaxColorAttachments) {
+ attachment = 0;
+ commandContext->BeginRender(descriptor);
+ commandContext->EndRender();
+ descriptor = nil;
+ }
+ }
+ }
+
+ if (descriptor != nil) {
+ commandContext->BeginRender(descriptor);
+ commandContext->EndRender();
+ }
+ }
+ } else {
+ // Compute the buffer size big enough to fill the largest mip.
+ Extent3D largestMipSize = GetMipLevelVirtualSize(baseMipLevel);
+
+ // Metal validation layers: sourceBytesPerRow must be at least 64.
+ uint32_t largestMipBytesPerRow = std::max(
+ (largestMipSize.width / GetFormat().blockWidth) * GetFormat().blockByteSize, 64u);
+
+ // Metal validation layers: sourceBytesPerImage must be at least 512.
+ uint64_t largestMipBytesPerImage =
+ std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
+ (largestMipSize.height / GetFormat().blockHeight),
+ 512llu);
+
+ // TODO(enga): Multiply by largestMipSize.depth and do a larger 3D copy to clear a whole
+ // range of subresources when tracking that is improved.
+ uint64_t bufferSize = largestMipBytesPerImage * 1;
+
+ if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+ id<MTLBlitCommandEncoder> encoder = commandContext->EnsureBlit();
+ id<MTLBuffer> uploadBuffer = ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+
+ // Encode a buffer to texture copy to clear each subresource.
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ Extent3D virtualSize = GetMipLevelVirtualSize(level);
+
+ for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
+ ++arrayLayer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, arrayLayer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ // If the texture’s pixel format is a combined depth/stencil format, then
+ // options must be set to either blit the depth attachment portion or blit the
+ // stencil attachment portion.
+ std::array<MTLBlitOption, 3> blitOptions = {
+ MTLBlitOptionNone, MTLBlitOptionDepthFromDepthStencil,
+ MTLBlitOptionStencilFromDepthStencil};
+
+ auto blitOptionStart = blitOptions.begin();
+ auto blitOptionEnd = blitOptionStart + 1;
+ if (GetFormat().format == wgpu::TextureFormat::Depth24PlusStencil8) {
+ blitOptionStart = blitOptions.begin() + 1;
+ blitOptionEnd = blitOptionStart + 2;
+ }
+
+ for (auto it = blitOptionStart; it != blitOptionEnd; ++it) {
+ [encoder copyFromBuffer:uploadBuffer
+ sourceOffset:uploadHandle.startOffset
+ sourceBytesPerRow:largestMipBytesPerRow
+ sourceBytesPerImage:largestMipBytesPerImage
+ sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
+ 1)
+ toTexture:GetMTLTexture()
+ destinationSlice:arrayLayer
+ destinationLevel:level
+ destinationOrigin:MTLOriginMake(0, 0, 0)
+ options:(*it)];
+ }
+ }
+ }
+ }
+
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
+ layerCount);
+ device->IncrementLazyClearCountForTesting();
+ }
+ return {};
+ }
+
+ void Texture::EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount) {
+ if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+ return;
+ }
+ if (!IsSubresourceContentInitialized(baseMipLevel, levelCount, baseArrayLayer,
+ layerCount)) {
+ // If subresource has not been initialized, clear it to black as it could
+ // contain dirty bits from recycled memory
+ GetDevice()->ConsumedError(ClearTexture(baseMipLevel, levelCount, baseArrayLayer,
+ layerCount, TextureBase::ClearValue::Zero));
+ }
+ }
+
TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
: TextureViewBase(texture, descriptor) {
id<MTLTexture> mtlTexture = ToBackend(texture)->GetMTLTexture();
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 872d48674cc..dbb40ec2266 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -17,7 +17,9 @@
#include "dawn_native/BackendConnection.h"
#include "dawn_native/Commands.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/ErrorData.h"
#include "dawn_native/Instance.h"
+#include "dawn_native/Surface.h"
#include <spirv_cross.hpp>
@@ -25,9 +27,9 @@ namespace dawn_native { namespace null {
// Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
- Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, BackendType::Null) {
+ Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
mPCIInfo.name = "Null backend";
- mDeviceType = DeviceType::CPU;
+ mAdapterType = wgpu::AdapterType::CPU;
// Enable all extensions by default for the convenience of tests.
mSupportedExtensions.extensionsBitSet.flip();
@@ -46,7 +48,7 @@ namespace dawn_native { namespace null {
class Backend : public BackendConnection {
public:
- Backend(InstanceBase* instance) : BackendConnection(instance, BackendType::Null) {
+ Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Null) {
}
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override {
@@ -85,9 +87,9 @@ namespace dawn_native { namespace null {
}
Device::~Device() {
- mDynamicUploader = nullptr;
-
- mPendingOperations.clear();
+ BaseDestructor();
+ // This assert is in the destructor rather than Device::Destroy() because it needs to make
+ // sure buffers have been destroyed before the device.
ASSERT(mMemoryUsage == 0);
}
@@ -131,25 +133,35 @@ namespace dawn_native { namespace null {
if (IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc::CompileOptions options;
- shaderc_spvc::Context context;
+ options.SetValidate(IsValidationEnabled());
+ shaderc_spvc::Context* context = module->GetContext();
shaderc_spvc_status status =
- context.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
+ context->InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
if (status != shaderc_spvc_status_success) {
return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
}
- spirv_cross::Compiler* compiler =
- reinterpret_cast<spirv_cross::Compiler*>(context.GetCompiler());
- module->ExtractSpirvInfo(*compiler);
+ spirv_cross::Compiler* compiler;
+ status = context->GetCompiler(reinterpret_cast<void**>(&compiler));
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR("Unable to get cross compiler");
+ }
+ DAWN_TRY(module->ExtractSpirvInfo(*compiler));
} else {
spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
- module->ExtractSpirvInfo(compiler);
+ DAWN_TRY(module->ExtractSpirvInfo(compiler));
}
return module;
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
- return new SwapChain(this, descriptor);
+ return new OldSwapChain(this, descriptor);
+ }
+ ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return new SwapChain(this, surface, previousSwapChain, descriptor);
}
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal);
@@ -167,6 +179,18 @@ namespace dawn_native { namespace null {
return std::move(stagingBuffer);
}
+ void Device::Destroy() {
+ ASSERT(mLossStatus != LossStatus::AlreadyLost);
+
+ mDynamicUploader = nullptr;
+
+ mPendingOperations.clear();
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ return {};
+ }
+
MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
uint64_t sourceOffset,
BufferBase* destination,
@@ -187,7 +211,7 @@ namespace dawn_native { namespace null {
MaybeError Device::IncrementMemoryUsage(size_t bytes) {
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max() / 2, "");
if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
- return DAWN_DEVICE_LOST_ERROR("Out of memory.");
+ return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
}
mMemoryUsage += bytes;
return {};
@@ -228,6 +252,25 @@ namespace dawn_native { namespace null {
mLastSubmittedSerial++;
}
+ // BindGroupDataHolder
+
+ BindGroupDataHolder::BindGroupDataHolder(size_t size)
+ : mBindingDataAllocation(malloc(size)) // malloc is guaranteed to return a
+ // pointer aligned enough for the allocation
+ {
+ }
+
+ BindGroupDataHolder::~BindGroupDataHolder() {
+ free(mBindingDataAllocation);
+ }
+
+ // BindGroup
+
+ BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
+ : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
+ BindGroupBase(device, descriptor, mBindingDataAllocation) {
+ }
+
// Buffer
struct BufferMapOperation : PendingOperation {
@@ -338,20 +381,60 @@ namespace dawn_native { namespace null {
// SwapChain
- SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device, descriptor) {
+ SwapChain::SwapChain(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor)
+ : NewSwapChainBase(device, surface, descriptor) {
+ if (previousSwapChain != nullptr) {
+ // TODO(cwallez@chromium.org): figure out what should happen when surfaces are used by
+ // multiple backends one after the other. It probably needs to block until the backend
+ // and GPU are completely finished with the previous swapchain.
+ ASSERT(previousSwapChain->GetBackendType() == wgpu::BackendType::Null);
+ previousSwapChain->DetachFromSurface();
+ }
+ }
+
+ SwapChain::~SwapChain() {
+ DetachFromSurface();
+ }
+
+ MaybeError SwapChain::PresentImpl() {
+ mTexture->Destroy();
+ mTexture = nullptr;
+ return {};
+ }
+
+ ResultOrError<TextureViewBase*> SwapChain::GetCurrentTextureViewImpl() {
+ TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+ mTexture = AcquireRef(
+ new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
+ return mTexture->CreateView(nullptr);
+ }
+
+ void SwapChain::DetachFromSurfaceImpl() {
+ if (mTexture.Get() != nullptr) {
+ mTexture->Destroy();
+ mTexture = nullptr;
+ }
+ }
+
+ // OldSwapChain
+
+ OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+ : OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
im.Init(im.userData, nullptr);
}
- SwapChain::~SwapChain() {
+ OldSwapChain::~OldSwapChain() {
}
- TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+ TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
return GetDevice()->CreateTexture(descriptor);
}
- MaybeError SwapChain::OnBeforePresent(TextureBase*) {
+ MaybeError OldSwapChain::OnBeforePresent(TextureBase*) {
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index 82b37bc798b..b41061e743d 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -38,7 +38,7 @@
namespace dawn_native { namespace null {
class Adapter;
- using BindGroup = BindGroupBase;
+ class BindGroup;
using BindGroupLayout = BindGroupLayoutBase;
class Buffer;
class CommandBuffer;
@@ -125,11 +125,18 @@ namespace dawn_native { namespace null {
const ShaderModuleDescriptor* descriptor) override;
ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
+ ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
ResultOrError<TextureBase*> CreateTextureImpl(const TextureDescriptor* descriptor) override;
ResultOrError<TextureViewBase*> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
+ void Destroy() override;
+ MaybeError WaitForIdleForDestruction() override;
+
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
@@ -150,6 +157,24 @@ namespace dawn_native { namespace null {
ResultOrError<DeviceBase*> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
};
+ // Helper class so |BindGroup| can allocate memory for its binding data,
+ // before calling the BindGroupBase base class constructor.
+ class BindGroupDataHolder {
+ protected:
+ explicit BindGroupDataHolder(size_t size);
+ ~BindGroupDataHolder();
+
+ void* mBindingDataAllocation;
+ };
+
+ // We don't have the complexity of placement-allocation of bind group data in
+ // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
+ class BindGroup : private BindGroupDataHolder, public BindGroupBase {
+ public:
+ BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
+ ~BindGroup() override = default;
+ };
+
class Buffer : public BufferBase {
public:
Buffer(Device* device, const BufferDescriptor* descriptor);
@@ -194,10 +219,26 @@ namespace dawn_native { namespace null {
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
};
- class SwapChain : public SwapChainBase {
+ class SwapChain : public NewSwapChainBase {
+ public:
+ SwapChain(Device* device,
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor);
+ ~SwapChain() override;
+
+ private:
+ Ref<Texture> mTexture;
+
+ MaybeError PresentImpl() override;
+ ResultOrError<TextureViewBase*> GetCurrentTextureViewImpl() override;
+ void DetachFromSurfaceImpl() override;
+ };
+
+ class OldSwapChain : public OldSwapChainBase {
public:
- SwapChain(Device* device, const SwapChainDescriptor* descriptor);
- ~SwapChain();
+ OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+ ~OldSwapChain();
protected:
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
index 65d086a8455..7116d8e773d 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
@@ -14,13 +14,13 @@
#include "dawn_native/opengl/BackendGL.h"
-#include "common/Constants.h"
+#include "common/GPUInfo.h"
+#include "common/Log.h"
#include "dawn_native/Instance.h"
#include "dawn_native/OpenGLBackend.h"
#include "dawn_native/opengl/DeviceGL.h"
#include <cstring>
-#include <iostream>
namespace dawn_native { namespace opengl {
@@ -31,12 +31,12 @@ namespace dawn_native { namespace opengl {
uint32_t vendorId;
};
- const Vendor kVendors[] = {{"ATI", kVendorID_AMD},
- {"ARM", kVendorID_ARM},
- {"Imagination", kVendorID_ImgTec},
- {"Intel", kVendorID_Intel},
- {"NVIDIA", kVendorID_Nvidia},
- {"Qualcomm", kVendorID_Qualcomm}};
+ const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
+ {"ARM", gpu_info::kVendorID_ARM},
+ {"Imagination", gpu_info::kVendorID_ImgTec},
+ {"Intel", gpu_info::kVendorID_Intel},
+ {"NVIDIA", gpu_info::kVendorID_Nvidia},
+ {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
uint32_t GetVendorIdFromVendors(const char* vendor) {
uint32_t vendorId = 0;
@@ -102,11 +102,11 @@ namespace dawn_native { namespace opengl {
}
if (type == GL_DEBUG_TYPE_ERROR) {
- std::cout << "OpenGL error:" << std::endl;
- std::cout << " Source: " << sourceText << std::endl;
- std::cout << " ID: " << id << std::endl;
- std::cout << " Severity: " << severityText << std::endl;
- std::cout << " Message: " << message << std::endl;
+ dawn::WarningLog() << "OpenGL error:"
+ << "\n Source: " << sourceText //
+ << "\n ID: " << id //
+ << "\n Severity: " << severityText //
+ << "\n Message: " << message;
// Abort on an error when in Debug mode.
UNREACHABLE();
@@ -119,7 +119,7 @@ namespace dawn_native { namespace opengl {
class Adapter : public AdapterBase {
public:
- Adapter(InstanceBase* instance) : AdapterBase(instance, BackendType::OpenGL) {
+ Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::OpenGL) {
}
MaybeError Initialize(const AdapterDiscoveryOptions* options) {
@@ -225,7 +225,8 @@ namespace dawn_native { namespace opengl {
// Implementation of the OpenGL backend's BackendConnection
- Backend::Backend(InstanceBase* instance) : BackendConnection(instance, BackendType::OpenGL) {
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::OpenGL) {
}
std::vector<std::unique_ptr<AdapterBase>> Backend::DiscoverDefaultAdapters() {
@@ -241,7 +242,7 @@ namespace dawn_native { namespace opengl {
return DAWN_VALIDATION_ERROR("The OpenGL backend can only create a single adapter");
}
- ASSERT(optionsBase->backendType == BackendType::OpenGL);
+ ASSERT(optionsBase->backendType == WGPUBackendType_OpenGL);
const AdapterDiscoveryOptions* options =
static_cast<const AdapterDiscoveryOptions*>(optionsBase);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
new file mode 100644
index 00000000000..383607bff12
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.cpp
@@ -0,0 +1,35 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/opengl/BindGroupGL.h"
+
+#include "dawn_native/opengl/BindGroupLayoutGL.h"
+#include "dawn_native/opengl/DeviceGL.h"
+
+namespace dawn_native { namespace opengl {
+
+ BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+ : BindGroupBase(this, device, descriptor) {
+ }
+
+ BindGroup::~BindGroup() {
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
+ }
+
+ // static
+ BindGroup* BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+ }
+
+}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
new file mode 100644
index 00000000000..9ce8ed7915b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupGL.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPGL_H_
+
+#include "common/PlacementAllocated.h"
+#include "dawn_native/BindGroup.h"
+
+namespace dawn_native { namespace opengl {
+
+ class BindGroupLayout;
+ class Device;
+
+ class BindGroup : public BindGroupBase, public PlacementAllocated {
+ public:
+ BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ ~BindGroup() override;
+
+ static BindGroup* Create(Device* device, const BindGroupDescriptor* descriptor);
+ };
+
+}} // namespace dawn_native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BINDGROUPGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
new file mode 100644
index 00000000000..7c098c8700b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.cpp
@@ -0,0 +1,36 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/opengl/BindGroupLayoutGL.h"
+
+#include "dawn_native/opengl/BindGroupGL.h"
+
+namespace dawn_native { namespace opengl {
+
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor)
+ : BindGroupLayoutBase(device, descriptor),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
+ BindGroup* BindGroupLayout::AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor) {
+ return mBindGroupAllocator.Allocate(device, descriptor);
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
new file mode 100644
index 00000000000..ab8dab47301
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BindGroupLayoutGL.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+#define DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
+
+#include "common/SlabAllocator.h"
+#include "dawn_native/BindGroupLayout.h"
+
+namespace dawn_native { namespace opengl {
+
+ class BindGroup;
+ class Device;
+
+ class BindGroupLayout : public BindGroupLayoutBase {
+ public:
+ BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
+
+ BindGroup* AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ private:
+ SlabAllocator<BindGroup> mBindGroupAllocator;
+ };
+
+}} // namespace dawn_native::opengl
+
+#endif // DAWNNATIVE_OPENGL_BINDGROUPLAYOUTGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 3993d5f1071..19e674f4a69 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -239,25 +239,46 @@ namespace dawn_native { namespace opengl {
uint32_t dynamicOffsetCount,
uint64_t* dynamicOffsets) {
const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
- const auto& layout = group->GetLayout()->GetBindingInfo();
- uint32_t currentDynamicIndex = 0;
+ uint32_t currentDynamicOffsetIndex = 0;
- for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
- switch (layout.types[bindingIndex]) {
+ for (BindingIndex bindingIndex = 0;
+ bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo =
+ group->GetLayout()->GetBindingInfo(bindingIndex);
+
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer: {
BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
GLuint buffer = ToBackend(binding.buffer)->GetHandle();
GLuint uboIndex = indices[bindingIndex];
GLuint offset = binding.offset;
- if (layout.hasDynamicOffset[bindingIndex]) {
- offset += dynamicOffsets[currentDynamicIndex];
- ++currentDynamicIndex;
+ if (bindingInfo.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicOffsetIndex];
+ ++currentDynamicOffsetIndex;
}
gl.BindBufferRange(GL_UNIFORM_BUFFER, uboIndex, buffer, offset,
binding.size);
- } break;
+ break;
+ }
+
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
+ BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+ GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+ GLuint ssboIndex = indices[bindingIndex];
+ GLuint offset = binding.offset;
+
+ if (bindingInfo.hasDynamicOffset) {
+ offset += dynamicOffsets[currentDynamicOffsetIndex];
+ ++currentDynamicOffsetIndex;
+ }
+
+ gl.BindBufferRange(GL_SHADER_STORAGE_BUFFER, ssboIndex, buffer, offset,
+ binding.size);
+ break;
+ }
case wgpu::BindingType::Sampler: {
Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
@@ -273,7 +294,8 @@ namespace dawn_native { namespace opengl {
gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
}
}
- } break;
+ break;
+ }
case wgpu::BindingType::SampledTexture: {
TextureView* view =
@@ -286,25 +308,12 @@ namespace dawn_native { namespace opengl {
gl.ActiveTexture(GL_TEXTURE0 + unit);
gl.BindTexture(target, handle);
}
- } break;
-
- case wgpu::BindingType::StorageBuffer: {
- BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
- GLuint buffer = ToBackend(binding.buffer)->GetHandle();
- GLuint ssboIndex = indices[bindingIndex];
- GLuint offset = binding.offset;
-
- if (layout.hasDynamicOffset[bindingIndex]) {
- offset += dynamicOffsets[currentDynamicIndex];
- ++currentDynamicIndex;
- }
-
- gl.BindBufferRange(GL_SHADER_STORAGE_BUFFER, ssboIndex, buffer, offset,
- binding.size);
- } break;
+ break;
+ }
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
@@ -408,12 +417,13 @@ namespace dawn_native { namespace opengl {
auto TransitionForPass = [](const PassResourceUsage& usages) {
for (size_t i = 0; i < usages.textures.size(); i++) {
Texture* texture = ToBackend(usages.textures[i]);
- // We count the lazy clears for non output attachment textures in order to match the
- // backdoor lazy clear counts in Vulkan and D3D12.
- bool isLazyClear =
- !(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment);
- texture->EnsureSubresourceContentInitialized(
- 0, texture->GetNumMipLevels(), 0, texture->GetArrayLayers(), isLazyClear);
+ // Clear textures that are not output attachments. Output attachments will be
+ // cleared in BeginRenderPass by setting the loadop to clear when the
+ // texture subresource has not been initialized before the render pass.
+ if (!(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment)) {
+ texture->EnsureSubresourceContentInitialized(0, texture->GetNumMipLevels(), 0,
+ texture->GetArrayLayers());
+ }
}
};
@@ -429,15 +439,19 @@ namespace dawn_native { namespace opengl {
ExecuteComputePass();
nextPassNumber++;
- } break;
+ break;
+ }
case Command::BeginRenderPass: {
auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
TransitionForPass(passResourceUsages[nextPassNumber]);
+
+ LazyClearRenderPassAttachments(cmd);
ExecuteRenderPass(cmd);
nextPassNumber++;
- } break;
+ break;
+ }
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
@@ -450,7 +464,8 @@ namespace dawn_native { namespace opengl {
gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- } break;
+ break;
+ }
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
@@ -529,7 +544,8 @@ namespace dawn_native { namespace opengl {
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
@@ -586,7 +602,8 @@ namespace dawn_native { namespace opengl {
gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
gl.DeleteFramebuffers(1, &readFBO);
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
@@ -615,9 +632,13 @@ namespace dawn_native { namespace opengl {
dstTexture->GetHandle(), dstTexture->GetGLTarget(),
dst.mipLevel, dst.origin.x, dst.origin.y, dst.arrayLayer,
copySize.width, copySize.height, 1);
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
}
@@ -633,7 +654,7 @@ namespace dawn_native { namespace opengl {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
return;
- } break;
+ }
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
@@ -642,7 +663,8 @@ namespace dawn_native { namespace opengl {
gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
// TODO(cwallez@chromium.org): add barriers to the API
gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
@@ -655,7 +677,8 @@ namespace dawn_native { namespace opengl {
gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
// TODO(cwallez@chromium.org): add barriers to the API
gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
- } break;
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
@@ -663,7 +686,8 @@ namespace dawn_native { namespace opengl {
lastPipeline->ApplyNow();
bindGroupTracker.OnSetPipeline(lastPipeline);
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
@@ -673,7 +697,8 @@ namespace dawn_native { namespace opengl {
}
bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
cmd->dynamicOffsetCount, dynamicOffsets);
- } break;
+ break;
+ }
case Command::InsertDebugMarker:
case Command::PopDebugGroup:
@@ -681,9 +706,13 @@ namespace dawn_native { namespace opengl {
// Due to lack of linux driver support for GL_EXT_debug_marker
// extension these functions are skipped.
SkipCommand(&mCommands, type);
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
@@ -779,7 +808,6 @@ namespace dawn_native { namespace opengl {
for (uint32_t i :
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
auto* attachmentInfo = &renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo->view.Get());
// Load op - color
// TODO(cwallez@chromium.org): Choose the clear function depending on the
@@ -791,30 +819,14 @@ namespace dawn_native { namespace opengl {
gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo->clearColor.r);
}
- switch (attachmentInfo->storeOp) {
- case wgpu::StoreOp::Store: {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- } break;
-
- case wgpu::StoreOp::Clear: {
- // TODO(natlee@microsoft.com): call glDiscard to do optimization
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- } break;
-
- default:
- UNREACHABLE();
- break;
+ if (attachmentInfo->storeOp == wgpu::StoreOp::Clear) {
+ // TODO(natlee@microsoft.com): call glDiscard to do optimization
}
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto* attachmentInfo = &renderPass->depthStencilAttachment;
const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
- TextureView* view = ToBackend(attachmentInfo->view.Get());
// Load op - depth/stencil
bool doDepthClear = attachmentFormat.HasDepth() &&
@@ -838,18 +850,6 @@ namespace dawn_native { namespace opengl {
const GLint clearStencil = attachmentInfo->clearStencil;
gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
}
-
- if (attachmentInfo->depthStoreOp == wgpu::StoreOp::Store &&
- attachmentInfo->stencilStoreOp == wgpu::StoreOp::Store) {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- } else if (attachmentInfo->depthStoreOp == wgpu::StoreOp::Clear &&
- attachmentInfo->stencilStoreOp == wgpu::StoreOp::Clear) {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- }
}
}
@@ -876,7 +876,8 @@ namespace dawn_native { namespace opengl {
draw->firstVertex, draw->vertexCount,
draw->instanceCount);
}
- } break;
+ break;
+ }
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
@@ -895,14 +896,26 @@ namespace dawn_native { namespace opengl {
indexBufferBaseOffset),
draw->instanceCount, draw->baseVertex, draw->firstInstance);
} else {
- // This branch is only needed on OpenGL < 4.2
- gl.DrawElementsInstancedBaseVertex(
- lastPipeline->GetGLPrimitiveTopology(), draw->indexCount, formatType,
- reinterpret_cast<void*>(draw->firstIndex * formatSize +
- indexBufferBaseOffset),
- draw->instanceCount, draw->baseVertex);
+ // This branch is only needed on OpenGL < 4.2; ES < 3.2
+ if (draw->baseVertex != 0) {
+ gl.DrawElementsInstancedBaseVertex(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ formatType,
+ reinterpret_cast<void*>(draw->firstIndex * formatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount, draw->baseVertex);
+ } else {
+ // This branch is only needed on OpenGL < 3.2; ES < 3.2
+ gl.DrawElementsInstanced(
+ lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+ formatType,
+ reinterpret_cast<void*>(draw->firstIndex * formatSize +
+ indexBufferBaseOffset),
+ draw->instanceCount);
+ }
}
- } break;
+ break;
+ }
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
@@ -916,7 +929,8 @@ namespace dawn_native { namespace opengl {
gl.DrawArraysIndirect(
lastPipeline->GetGLPrimitiveTopology(),
reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
@@ -934,7 +948,8 @@ namespace dawn_native { namespace opengl {
gl.DrawElementsIndirect(
lastPipeline->GetGLPrimitiveTopology(), formatType,
reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
- } break;
+ break;
+ }
case Command::InsertDebugMarker:
case Command::PopDebugGroup:
@@ -942,7 +957,8 @@ namespace dawn_native { namespace opengl {
// Due to lack of linux driver support for GL_EXT_debug_marker
// extension these functions are skipped.
SkipCommand(iter, type);
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
@@ -951,7 +967,8 @@ namespace dawn_native { namespace opengl {
vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
bindGroupTracker.OnSetPipeline(lastPipeline);
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
@@ -961,19 +978,22 @@ namespace dawn_native { namespace opengl {
}
bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
cmd->dynamicOffsetCount, dynamicOffsets);
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
indexBufferBaseOffset = cmd->offset;
vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
cmd->offset);
- } break;
+ break;
+ }
default:
UNREACHABLE();
@@ -992,28 +1012,32 @@ namespace dawn_native { namespace opengl {
}
gl.DeleteFramebuffers(1, &fbo);
return;
- } break;
+ }
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
persistentPipelineState.SetStencilReference(gl, cmd->reference);
- } break;
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
- } break;
+ break;
+ }
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
- } break;
+ break;
+ }
case Command::SetBlendColor: {
SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
gl.BlendColor(cmd->color.r, cmd->color.g, cmd->color.b, cmd->color.a);
- } break;
+ break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
@@ -1026,9 +1050,13 @@ namespace dawn_native { namespace opengl {
DoRenderBundleCommand(iter, type);
}
}
- } break;
+ break;
+ }
- default: { DoRenderBundleCommand(&mCommands, type); } break;
+ default: {
+ DoRenderBundleCommand(&mCommands, type);
+ break;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index 9c252ef6b6f..7cf41a0bbda 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -15,9 +15,11 @@
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_native/BackendConnection.h"
-#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/ErrorData.h"
+#include "dawn_native/opengl/BindGroupGL.h"
+#include "dawn_native/opengl/BindGroupLayoutGL.h"
#include "dawn_native/opengl/BufferGL.h"
#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/ComputePipelineGL.h"
@@ -35,6 +37,7 @@ namespace dawn_native { namespace opengl {
const DeviceDescriptor* descriptor,
const OpenGLFunctions& functions)
: DeviceBase(adapter, descriptor), gl(functions) {
+ InitTogglesFromDriver();
if (descriptor != nullptr) {
ApplyToggleOverrides(descriptor);
}
@@ -42,17 +45,31 @@ namespace dawn_native { namespace opengl {
}
Device::~Device() {
- CheckPassedFences();
- ASSERT(mFencesInFlight.empty());
+ BaseDestructor();
+ }
- // Some operations might have been started since the last submit and waiting
- // on a serial that doesn't have a corresponding fence enqueued. Force all
- // operations to look as if they were completed (because they were).
- mCompletedSerial = mLastSubmittedSerial + 1;
+ void Device::InitTogglesFromDriver() {
+ bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
- mDynamicUploader = nullptr;
+ bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
- Tick();
+ // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
+ // procs without the extension suffix.
+ // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
+
+ // supportsBaseVertex |=
+ // (gl.IsAtLeastGLES(2, 0) &&
+ // (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
+ // gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
+
+ // supportsBaseInstance |=
+ // (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
+ // (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
+
+ // TODO(crbug.com/dawn/343): Investigate emulation.
+ SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
+ SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
}
const GLFormat& Device::GetGLFormat(const Format& format) {
@@ -66,7 +83,7 @@ namespace dawn_native { namespace opengl {
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
const BindGroupDescriptor* descriptor) {
- return new BindGroup(this, descriptor);
+ return BindGroup::Create(this, descriptor);
}
ResultOrError<BindGroupLayoutBase*> Device::CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) {
@@ -105,6 +122,12 @@ namespace dawn_native { namespace opengl {
const SwapChainDescriptor* descriptor) {
return new SwapChain(this, descriptor);
}
+ ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
+ }
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return new Texture(this, descriptor);
}
@@ -170,4 +193,23 @@ namespace dawn_native { namespace opengl {
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
}
+ void Device::Destroy() {
+ ASSERT(mLossStatus != LossStatus::AlreadyLost);
+
+ // Some operations might have been started since the last submit and waiting
+ // on a serial that doesn't have a corresponding fence enqueued. Force all
+ // operations to look as if they were completed (because they were).
+ mCompletedSerial = mLastSubmittedSerial + 1;
+
+ mDynamicUploader = nullptr;
+ }
+
+ MaybeError Device::WaitForIdleForDestruction() {
+ gl.Finish();
+ CheckPassedFences();
+ ASSERT(mFencesInFlight.empty());
+ Tick();
+ return {};
+ }
+
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index 757f27cd9d2..d9ec9c024f7 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -80,12 +80,19 @@ namespace dawn_native { namespace opengl {
const ShaderModuleDescriptor* descriptor) override;
ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
+ ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
ResultOrError<TextureBase*> CreateTextureImpl(const TextureDescriptor* descriptor) override;
ResultOrError<TextureViewBase*> CreateTextureViewImpl(
TextureBase* texture,
const TextureViewDescriptor* descriptor) override;
+ void InitTogglesFromDriver();
void CheckPassedFences();
+ void Destroy() override;
+ MaybeError WaitForIdleForDestruction() override;
Serial mCompletedSerial = 0;
Serial mLastSubmittedSerial = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h b/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
index 6542ff90299..bd2cc76ca7a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/Forward.h
@@ -17,17 +17,11 @@
#include "dawn_native/ToBackend.h"
-namespace {
- class BindGroupBase;
- class BindGroup;
- class RenderPassDescriptor;
-} // namespace
-
namespace dawn_native { namespace opengl {
class Adapter;
- using BindGroup = BindGroupBase;
- using BindGroupLayout = BindGroupLayoutBase;
+ class BindGroup;
+ class BindGroupLayout;
class Buffer;
class CommandBuffer;
class ComputePipeline;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
index fbab4152543..34513f5a11a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
@@ -24,7 +24,7 @@
namespace dawn_native { namespace opengl {
AdapterDiscoveryOptions::AdapterDiscoveryOptions()
- : AdapterDiscoveryOptionsBase(BackendType::OpenGL) {
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
}
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp
index 0bc5781a1aa..c54a0adb083 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.cpp
@@ -22,7 +22,7 @@ namespace dawn_native { namespace opengl {
MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
if (getString == nullptr) {
- return DAWN_DEVICE_LOST_ERROR("Couldn't load glGetString");
+ return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
}
std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
@@ -74,12 +74,12 @@ namespace dawn_native { namespace opengl {
return mSupportedGLExtensionsSet.count(extension) != 0;
}
- bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) {
+ bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
return mStandard == Standard::Desktop &&
std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
}
- bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) {
+ bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
return mStandard == Standard::ES &&
std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h
index 14c7e9198a7..e51430067e8 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLFunctions.h
@@ -25,8 +25,8 @@ namespace dawn_native { namespace opengl {
public:
MaybeError Initialize(GetProcAddress getProc);
- bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion);
- bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion);
+ bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
+ bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
bool IsGLExtensionSupported(const char* extension) const;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index d76b091fd6f..03d2897e022 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -15,13 +15,13 @@
#include "dawn_native/opengl/PipelineGL.h"
#include "common/BitSetIterator.h"
+#include "common/Log.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/opengl/Forward.h"
#include "dawn_native/opengl/OpenGLFunctions.h"
#include "dawn_native/opengl/PipelineLayoutGL.h"
#include "dawn_native/opengl/ShaderModuleGL.h"
-#include <iostream>
#include <set>
namespace dawn_native { namespace opengl {
@@ -64,9 +64,8 @@ namespace dawn_native { namespace opengl {
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
- std::cout << source << std::endl;
- std::cout << "Program compilation failed:\n";
- std::cout << buffer.data() << std::endl;
+ dawn::ErrorLog() << source << "\nProgram compilation failed:\n"
+ << buffer.data();
}
}
return shader;
@@ -97,8 +96,7 @@ namespace dawn_native { namespace opengl {
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
- std::cout << "Program link failed:\n";
- std::cout << buffer.data() << std::endl;
+ dawn::ErrorLog() << "Program link failed:\n" << buffer.data();
}
}
@@ -109,30 +107,33 @@ namespace dawn_native { namespace opengl {
const auto& indices = layout->GetBindingIndexInfo();
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
- const auto& groupInfo = layout->GetBindGroupLayout(group)->GetBindingInfo();
+ const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(group);
- for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
- if (!groupInfo.mask[binding]) {
- continue;
- }
+ for (const auto& it : bgl->GetBindingMap()) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
- std::string name = GetBindingName(group, binding);
- switch (groupInfo.types[binding]) {
+ std::string name = GetBindingName(group, bindingNumber);
+ switch (bgl->GetBindingInfo(bindingIndex).type) {
case wgpu::BindingType::UniformBuffer: {
GLint location = gl.GetUniformBlockIndex(mProgram, name.c_str());
if (location != -1) {
- gl.UniformBlockBinding(mProgram, location, indices[group][binding]);
+ gl.UniformBlockBinding(mProgram, location,
+ indices[group][bindingIndex]);
}
- } break;
+ break;
+ }
- case wgpu::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
GLuint location = gl.GetProgramResourceIndex(
mProgram, GL_SHADER_STORAGE_BLOCK, name.c_str());
if (location != GL_INVALID_INDEX) {
gl.ShaderStorageBlockBinding(mProgram, location,
- indices[group][binding]);
+ indices[group][bindingIndex]);
}
- } break;
+ break;
+ }
case wgpu::BindingType::Sampler:
case wgpu::BindingType::SampledTexture:
@@ -141,7 +142,8 @@ namespace dawn_native { namespace opengl {
break;
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
@@ -177,11 +179,12 @@ namespace dawn_native { namespace opengl {
indices[combined.textureLocation.group][combined.textureLocation.binding];
mUnitsForTextures[textureIndex].push_back(textureUnit);
- wgpu::TextureComponentType componentType =
- layout->GetBindGroupLayout(combined.textureLocation.group)
- ->GetBindingInfo()
- .textureComponentTypes[combined.textureLocation.binding];
- bool shouldUseFiltering = componentType == wgpu::TextureComponentType::Float;
+ const BindGroupLayoutBase* bgl =
+ layout->GetBindGroupLayout(combined.textureLocation.group);
+ Format::Type componentType =
+ bgl->GetBindingInfo(bgl->GetBindingIndex(combined.textureLocation.binding))
+ .textureComponentType;
+ bool shouldUseFiltering = componentType == Format::Type::Float;
GLuint samplerIndex =
indices[combined.samplerLocation.group][combined.samplerLocation.binding];
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
index 2884dfa5575..383a0796e1d 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
@@ -28,34 +28,33 @@ namespace dawn_native { namespace opengl {
GLuint ssboIndex = 0;
for (uint32_t group : IterateBitSet(GetBindGroupLayoutsMask())) {
- const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
+ const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
- for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
- if (!groupInfo.mask[binding]) {
- continue;
- }
-
- switch (groupInfo.types[binding]) {
+ for (BindingIndex bindingIndex = 0; bindingIndex < bgl->GetBindingCount();
+ ++bindingIndex) {
+ switch (bgl->GetBindingInfo(bindingIndex).type) {
case wgpu::BindingType::UniformBuffer:
- mIndexInfo[group][binding] = uboIndex;
+ mIndexInfo[group][bindingIndex] = uboIndex;
uboIndex++;
break;
case wgpu::BindingType::Sampler:
- mIndexInfo[group][binding] = samplerIndex;
+ mIndexInfo[group][bindingIndex] = samplerIndex;
samplerIndex++;
break;
case wgpu::BindingType::SampledTexture:
- mIndexInfo[group][binding] = sampledTextureIndex;
+ mIndexInfo[group][bindingIndex] = sampledTextureIndex;
sampledTextureIndex++;
break;
case wgpu::BindingType::StorageBuffer:
- mIndexInfo[group][binding] = ssboIndex;
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ mIndexInfo[group][bindingIndex] = ssboIndex;
ssboIndex++;
break;
case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
UNREACHABLE();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index e1138420a04..4174fe413a3 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -76,7 +76,7 @@ namespace dawn_native { namespace opengl {
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
// If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
// be updated.
- shaderc_spvc::CompileOptions options;
+ shaderc_spvc::CompileOptions options = GetCompileOptions();
// The range of Z-coordinate in the clipping volume of OpenGL is [-w, w], while it is
// [0, w] in D3D12, Metal and Vulkan, so we should normalize it in shaders in all
@@ -91,12 +91,11 @@ namespace dawn_native { namespace opengl {
#else
options.SetGLSLLanguageVersion(440);
#endif
- shaderc_spvc_status status =
- mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
- if (status != shaderc_spvc_status_success)
- return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
-
- compiler = reinterpret_cast<spirv_cross::CompilerGLSL*>(mSpvcContext.GetCompiler());
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options),
+ "Unable to initialize instance of spvc"));
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
+ "Unable to get cross compiler"));
} else {
// If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
// be updated.
@@ -122,38 +121,68 @@ namespace dawn_native { namespace opengl {
compiler->set_common_options(options);
}
- ExtractSpirvInfo(*compiler);
+ DAWN_TRY(ExtractSpirvInfo(*compiler));
- const auto& bindingInfo = GetBindingInfo();
+ const ShaderModuleBase::ModuleBindingInfo& bindingInfo = GetBindingInfo();
// Extract bindings names so that it can be used to get its location in program.
// Now translate the separate sampler / textures into combined ones and store their info.
// We need to do this before removing the set and binding decorations.
- compiler->build_combined_image_samplers();
-
- for (const auto& combined : compiler->get_combined_image_samplers()) {
- mCombinedInfo.emplace_back();
-
- auto& info = mCombinedInfo.back();
- info.samplerLocation.group =
- compiler->get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
- info.samplerLocation.binding =
- compiler->get_decoration(combined.sampler_id, spv::DecorationBinding);
- info.textureLocation.group =
- compiler->get_decoration(combined.image_id, spv::DecorationDescriptorSet);
- info.textureLocation.binding =
- compiler->get_decoration(combined.image_id, spv::DecorationBinding);
- compiler->set_name(combined.combined_id, info.GetName());
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ mSpvcContext.BuildCombinedImageSamplers();
+ } else {
+ compiler->build_combined_image_samplers();
+ }
+
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ std::vector<shaderc_spvc_combined_image_sampler> samplers;
+ mSpvcContext.GetCombinedImageSamplers(&samplers);
+ for (auto sampler : samplers) {
+ mCombinedInfo.emplace_back();
+ auto& info = mCombinedInfo.back();
+
+ mSpvcContext.GetDecoration(sampler.sampler_id,
+ shaderc_spvc_decoration_descriptorset,
+ &info.samplerLocation.group);
+ mSpvcContext.GetDecoration(sampler.sampler_id, shaderc_spvc_decoration_binding,
+ &info.samplerLocation.binding);
+ mSpvcContext.GetDecoration(sampler.image_id, shaderc_spvc_decoration_descriptorset,
+ &info.textureLocation.group);
+ mSpvcContext.GetDecoration(sampler.image_id, shaderc_spvc_decoration_binding,
+ &info.textureLocation.binding);
+ mSpvcContext.SetName(sampler.combined_id, info.GetName());
+ }
+ } else {
+ for (const auto& combined : compiler->get_combined_image_samplers()) {
+ mCombinedInfo.emplace_back();
+
+ auto& info = mCombinedInfo.back();
+ info.samplerLocation.group =
+ compiler->get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
+ info.samplerLocation.binding =
+ compiler->get_decoration(combined.sampler_id, spv::DecorationBinding);
+ info.textureLocation.group =
+ compiler->get_decoration(combined.image_id, spv::DecorationDescriptorSet);
+ info.textureLocation.binding =
+ compiler->get_decoration(combined.image_id, spv::DecorationBinding);
+ compiler->set_name(combined.combined_id, info.GetName());
+ }
}
// Change binding names to be "dawn_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
// isn't supported on OSX's OpenGL.
for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
- for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
- const auto& info = bindingInfo[group][binding];
- if (info.used) {
- compiler->set_name(info.base_type_id, GetBindingName(group, binding));
+ for (const auto& it : bindingInfo[group]) {
+ BindingNumber bindingNumber = it.first;
+ const auto& info = it.second;
+
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ mSpvcContext.SetName(info.base_type_id, GetBindingName(group, bindingNumber));
+ mSpvcContext.UnsetDecoration(info.id, shaderc_spvc_decoration_binding);
+ mSpvcContext.UnsetDecoration(info.id, shaderc_spvc_decoration_descriptorset);
+ } else {
+ compiler->set_name(info.base_type_id, GetBindingName(group, bindingNumber));
compiler->unset_decoration(info.id, spv::DecorationBinding);
compiler->unset_decoration(info.id, spv::DecorationDescriptorSet);
}
@@ -162,10 +191,10 @@ namespace dawn_native { namespace opengl {
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc::CompilationResult result;
- shaderc_spvc_status status = mSpvcContext.CompileShader(&result);
- if (status != shaderc_spvc_status_success)
- return DAWN_VALIDATION_ERROR("Unable to compile shader using spvc");
- mGlslSource = result.GetStringOutput();
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.CompileShader(&result),
+ "Unable to compile GLSL shader using spvc"));
+ DAWN_TRY(CheckSpvcSuccess(result.GetStringOutput(&mGlslSource),
+ "Unable to get GLSL shader text"));
} else {
mGlslSource = compiler->compile();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
index ea72bca1ac9..40efd0a27df 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
@@ -23,7 +23,7 @@
namespace dawn_native { namespace opengl {
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device, descriptor) {
+ : OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
im.Init(im.userData, nullptr);
}
@@ -36,7 +36,7 @@ namespace dawn_native { namespace opengl {
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
return nullptr;
}
GLuint nativeTexture = next.texture.u32;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
index d4df7d3a091..2fbe2c86223 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.h
@@ -23,7 +23,7 @@ namespace dawn_native { namespace opengl {
class Device;
- class SwapChain : public SwapChainBase {
+ class SwapChain : public OldSwapChainBase {
public:
SwapChain(Device* device, const SwapChainDescriptor* descriptor);
~SwapChain();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 7647d21c348..30bb0d5ba1b 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -184,7 +184,7 @@ namespace dawn_native { namespace opengl {
MaybeError Texture::ClearTexture(GLint baseMipLevel,
GLint levelCount,
GLint baseArrayLayer,
- uint32_t layerCount,
+ GLint layerCount,
TextureBase::ClearValue clearValue) {
// TODO(jiawei.shao@intel.com): initialize the textures with compressed formats.
if (GetFormat().isCompressed) {
@@ -193,12 +193,15 @@ namespace dawn_native { namespace opengl {
Device* device = ToBackend(GetDevice());
const OpenGLFunctions& gl = device->gl;
+
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
if (GetFormat().isRenderable) {
if (GetFormat().HasDepthOrStencil()) {
bool doDepthClear = GetFormat().HasDepth();
bool doStencilClear = GetFormat().HasStencil();
- GLfloat depth = clearColor;
+ GLfloat depth = fClearColor;
GLint stencil = clearColor;
if (doDepthClear) {
gl.DepthMask(GL_TRUE);
@@ -207,19 +210,55 @@ namespace dawn_native { namespace opengl {
gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
}
+ auto DoClear = [&]() {
+ if (doDepthClear && doStencilClear) {
+ gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+ } else if (doDepthClear) {
+ gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+ } else if (doStencilClear) {
+ gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+ }
+ };
+
GLuint framebuffer = 0;
gl.GenFramebuffers(1, &framebuffer);
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
- // TODO(natlee@microsoft.com): clear all mip levels and array layers.
- gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- GetGLTarget(), GetHandle(), 0);
- if (doDepthClear && doStencilClear) {
- gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
- } else if (doDepthClear) {
- gl.ClearBufferfv(GL_DEPTH, 0, &depth);
- } else if (doStencilClear) {
- gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+
+ for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ switch (GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ if (GetArrayLayers() == 1) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, 0, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
+ GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
+ GetHandle(), level);
+ DoClear();
+ } else {
+ for (GLint layer = baseArrayLayer;
+ layer < baseArrayLayer + layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER,
+ GL_DEPTH_STENCIL_ATTACHMENT,
+ GetHandle(), level, layer);
+ DoClear();
+ }
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
+
gl.DeleteFramebuffers(1, &framebuffer);
} else {
static constexpr uint32_t MAX_TEXEL_SIZE = 16;
@@ -231,9 +270,17 @@ namespace dawn_native { namespace opengl {
const GLFormat& glFormat = GetGLFormat();
for (GLint level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
Extent3D mipSize = GetMipLevelPhysicalSize(level);
- gl.ClearTexSubImage(mHandle, level, 0, 0, baseArrayLayer, mipSize.width,
- mipSize.height, layerCount, glFormat.format, glFormat.type,
- clearColorData.data());
+ for (GLint layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ gl.ClearTexSubImage(mHandle, level, 0, 0, layer, mipSize.width,
+ mipSize.height, 1, glFormat.format, glFormat.type,
+ clearColorData.data());
+ }
}
}
} else {
@@ -265,8 +312,7 @@ namespace dawn_native { namespace opengl {
// Fill the buffer with clear color
uint8_t* clearBuffer = nullptr;
DAWN_TRY(srcBuffer->MapAtCreation(&clearBuffer));
- std::fill(reinterpret_cast<uint32_t*>(clearBuffer),
- reinterpret_cast<uint32_t*>(clearBuffer + descriptor.size), clearColor);
+ memset(clearBuffer, clearColor, descriptor.size);
srcBuffer->Unmap();
// Bind buffer and texture, and make the buffer to texture copy
@@ -281,21 +327,42 @@ namespace dawn_native { namespace opengl {
Extent3D size = GetMipLevelPhysicalSize(level);
switch (GetDimension()) {
case wgpu::TextureDimension::e2D:
- // TODO(natlee@microsoft.com): This will break when layerCount is greater
- // than 1, because the buffer is only sized for one layer.
- ASSERT(layerCount == 1);
- gl.TexSubImage2D(GetGLTarget(), level, 0, 0, size.width, size.height,
- GetGLFormat().format, GetGLFormat().type, 0);
+ if (GetArrayLayers() == 1) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, 0, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ gl.TexSubImage2D(GetGLTarget(), level, 0, 0, size.width, size.height,
+ GetGLFormat().format, GetGLFormat().type, 0);
+ } else {
+ for (GLint layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ gl.TexSubImage3D(GetGLTarget(), level, 0, 0, layer, size.width,
+ size.height, 1, GetGLFormat().format,
+ GetGLFormat().type, 0);
+ }
+ }
break;
default:
UNREACHABLE();
}
- gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
- gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
-
- gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
+ gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+
+ gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+ }
+ if (clearValue == TextureBase::ClearValue::Zero) {
+ SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
+ layerCount);
+ device->IncrementLazyClearCountForTesting();
}
return {};
}
@@ -303,8 +370,7 @@ namespace dawn_native { namespace opengl {
void Texture::EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
- uint32_t layerCount,
- bool isLazyClear) {
+ uint32_t layerCount) {
if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
return;
}
@@ -312,11 +378,6 @@ namespace dawn_native { namespace opengl {
layerCount)) {
GetDevice()->ConsumedError(ClearTexture(baseMipLevel, levelCount, baseArrayLayer,
layerCount, TextureBase::ClearValue::Zero));
- if (isLazyClear) {
- GetDevice()->IncrementLazyClearCountForTesting();
- }
- SetIsSubresourceContentInitialized(true, baseMipLevel, levelCount, baseArrayLayer,
- layerCount);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
index b72c4fc2ec5..8e18116ae4b 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.h
@@ -40,15 +40,14 @@ namespace dawn_native { namespace opengl {
void EnsureSubresourceContentInitialized(uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
- uint32_t layerCount,
- bool isLazyClear = true);
+ uint32_t layerCount);
private:
void DestroyImpl() override;
MaybeError ClearTexture(GLint baseMipLevel,
GLint levelCount,
GLint baseArrayLayer,
- uint32_t layerCount,
+ GLint layerCount,
TextureBase::ClearValue clearValue);
GLuint mHandle;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
index dd9341f71ca..d138cef565e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/AdapterVk.cpp
@@ -20,7 +20,7 @@
namespace dawn_native { namespace vulkan {
Adapter::Adapter(Backend* backend, VkPhysicalDevice physicalDevice)
- : AdapterBase(backend->GetInstance(), BackendType::Vulkan),
+ : AdapterBase(backend->GetInstance(), wgpu::BackendType::Vulkan),
mPhysicalDevice(physicalDevice),
mBackend(backend) {
}
@@ -39,9 +39,8 @@ namespace dawn_native { namespace vulkan {
MaybeError Adapter::Initialize() {
DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
- if (!mDeviceInfo.maintenance1 &&
- mDeviceInfo.properties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
- return DAWN_DEVICE_LOST_ERROR(
+ if (!mDeviceInfo.maintenance1) {
+ return DAWN_INTERNAL_ERROR(
"Dawn requires Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 in order to support "
"viewport flipY");
}
@@ -54,16 +53,16 @@ namespace dawn_native { namespace vulkan {
switch (mDeviceInfo.properties.deviceType) {
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
- mDeviceType = DeviceType::IntegratedGPU;
+ mAdapterType = wgpu::AdapterType::IntegratedGPU;
break;
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
- mDeviceType = DeviceType::DiscreteGPU;
+ mAdapterType = wgpu::AdapterType::DiscreteGPU;
break;
case VK_PHYSICAL_DEVICE_TYPE_CPU:
- mDeviceType = DeviceType::CPU;
+ mAdapterType = wgpu::AdapterType::CPU;
break;
default:
- mDeviceType = DeviceType::Unknown;
+ mAdapterType = wgpu::AdapterType::Unknown;
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index 06a2c3417e2..aa7795169c5 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -14,31 +14,46 @@
#include "dawn_native/vulkan/BackendVk.h"
+#include "common/Log.h"
#include "common/SystemUtils.h"
#include "dawn_native/Instance.h"
#include "dawn_native/VulkanBackend.h"
#include "dawn_native/vulkan/AdapterVk.h"
#include "dawn_native/vulkan/VulkanError.h"
-#include <iostream>
+// TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+# if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
+# elif defined(DAWN_PLATFORM_WINDOWS)
+constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
+# elif defined(DAWN_PLATFORM_MACOS)
+constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
+# else
+# error "Unimplemented Swiftshader Vulkan backend platform"
+# endif
+#endif
#if defined(DAWN_PLATFORM_LINUX)
# if defined(DAWN_PLATFORM_ANDROID)
-const char kVulkanLibName[] = "libvulkan.so";
+constexpr char kVulkanLibName[] = "libvulkan.so";
# else
-const char kVulkanLibName[] = "libvulkan.so.1";
+constexpr char kVulkanLibName[] = "libvulkan.so.1";
# endif
#elif defined(DAWN_PLATFORM_WINDOWS)
-const char kVulkanLibName[] = "vulkan-1.dll";
+constexpr char kVulkanLibName[] = "vulkan-1.dll";
#elif defined(DAWN_PLATFORM_FUCHSIA)
-const char kVulkanLibName[] = "libvulkan.so";
+constexpr char kVulkanLibName[] = "libvulkan.so";
+#elif defined(DAWN_ENABLE_SWIFTSHADER)
+const char* kVulkanLibName = kSwiftshaderLibName;
#else
# error "Unimplemented Vulkan backend platform"
#endif
namespace dawn_native { namespace vulkan {
- Backend::Backend(InstanceBase* instance) : BackendConnection(instance, BackendType::Vulkan) {
+ Backend::Backend(InstanceBase* instance)
+ : BackendConnection(instance, wgpu::BackendType::Vulkan) {
}
Backend::~Backend() {
@@ -66,12 +81,12 @@ namespace dawn_native { namespace vulkan {
return mGlobalInfo;
}
- MaybeError Backend::Initialize() {
+ MaybeError Backend::LoadVulkan() {
#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
if (GetInstance()->IsBackendValidationEnabled()) {
std::string vkDataDir = GetExecutableDirectory() + DAWN_VK_DATA_DIR;
if (!SetEnvironmentVar("VK_LAYER_PATH", vkDataDir.c_str())) {
- return DAWN_DEVICE_LOST_ERROR("Couldn't set VK_LAYER_PATH");
+ return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
}
}
#endif
@@ -79,13 +94,29 @@ namespace dawn_native { namespace vulkan {
std::string fullSwiftshaderICDPath =
GetExecutableDirectory() + DAWN_SWIFTSHADER_VK_ICD_JSON;
if (!SetEnvironmentVar("VK_ICD_FILENAMES", fullSwiftshaderICDPath.c_str())) {
- return DAWN_DEVICE_LOST_ERROR("Couldn't set VK_ICD_FILENAMES");
+ return DAWN_INTERNAL_ERROR("Couldn't set VK_ICD_FILENAMES");
}
#endif
- if (!mVulkanLib.Open(kVulkanLibName)) {
- return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't open ") + kVulkanLibName);
+ if (mVulkanLib.Open(kVulkanLibName)) {
+ return {};
}
+ dawn::WarningLog() << std::string("Couldn't open ") + kVulkanLibName;
+
+#if defined(DAWN_ENABLE_SWIFTSHADER)
+ if (strcmp(kVulkanLibName, kSwiftshaderLibName) != 0) {
+ if (mVulkanLib.Open(kSwiftshaderLibName)) {
+ return {};
+ }
+ dawn::WarningLog() << std::string("Couldn't open ") + kSwiftshaderLibName;
+ }
+#endif
+
+ return DAWN_INTERNAL_ERROR("Couldn't load Vulkan");
+ }
+
+ MaybeError Backend::Initialize() {
+ DAWN_TRY(LoadVulkan());
DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
@@ -159,28 +190,15 @@ namespace dawn_native { namespace vulkan {
}
}
+ // Always request all extensions used to create VkSurfaceKHR objects so that they are
+ // always available for embedders looking to create VkSurfaceKHR on our VkInstance.
if (mGlobalInfo.fuchsiaImagePipeSwapchain) {
layersToRequest.push_back(kLayerNameFuchsiaImagePipeSwapchain);
usedKnobs.fuchsiaImagePipeSwapchain = true;
}
-
- // Always request all extensions used to create VkSurfaceKHR objects so that they are
- // always available for embedders looking to create VkSurfaceKHR on our VkInstance.
- if (mGlobalInfo.macosSurface) {
- extensionsToRequest.push_back(kExtensionNameMvkMacosSurface);
- usedKnobs.macosSurface = true;
- }
- if (mGlobalInfo.externalMemoryCapabilities) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryCapabilities);
- usedKnobs.externalMemoryCapabilities = true;
- }
- if (mGlobalInfo.externalSemaphoreCapabilities) {
- extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphoreCapabilities);
- usedKnobs.externalSemaphoreCapabilities = true;
- }
- if (mGlobalInfo.getPhysicalDeviceProperties2) {
- extensionsToRequest.push_back(kExtensionNameKhrGetPhysicalDeviceProperties2);
- usedKnobs.getPhysicalDeviceProperties2 = true;
+ if (mGlobalInfo.metalSurface) {
+ extensionsToRequest.push_back(kExtensionNameExtMetalSurface);
+ usedKnobs.metalSurface = true;
}
if (mGlobalInfo.surface) {
extensionsToRequest.push_back(kExtensionNameKhrSurface);
@@ -207,6 +225,28 @@ namespace dawn_native { namespace vulkan {
usedKnobs.fuchsiaImagePipeSurface = true;
}
+ // Mark the promoted extensions as present if the core version in which they were promoted
+ // is used. This allows having a single boolean that checks if the functionality from that
+ // extension is available (instead of checking extension || coreVersion).
+ if (mGlobalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ usedKnobs.getPhysicalDeviceProperties2 = true;
+ usedKnobs.externalMemoryCapabilities = true;
+ usedKnobs.externalSemaphoreCapabilities = true;
+ } else {
+ if (mGlobalInfo.externalMemoryCapabilities) {
+ extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryCapabilities);
+ usedKnobs.externalMemoryCapabilities = true;
+ }
+ if (mGlobalInfo.externalSemaphoreCapabilities) {
+ extensionsToRequest.push_back(kExtensionNameKhrExternalSemaphoreCapabilities);
+ usedKnobs.externalSemaphoreCapabilities = true;
+ }
+ if (mGlobalInfo.getPhysicalDeviceProperties2) {
+ extensionsToRequest.push_back(kExtensionNameKhrGetPhysicalDeviceProperties2);
+ usedKnobs.getPhysicalDeviceProperties2 = true;
+ }
+ }
+
VkApplicationInfo appInfo;
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pNext = nullptr;
@@ -241,7 +281,7 @@ namespace dawn_native { namespace vulkan {
createInfo.pUserData = this;
return CheckVkSuccess(mFunctions.CreateDebugReportCallbackEXT(
- mInstance, &createInfo, nullptr, &mDebugReportCallback),
+ mInstance, &createInfo, nullptr, &*mDebugReportCallback),
"vkCreateDebugReportcallback");
}
@@ -254,7 +294,7 @@ namespace dawn_native { namespace vulkan {
const char* /*pLayerPrefix*/,
const char* pMessage,
void* /*pUserdata*/) {
- std::cout << pMessage << std::endl;
+ dawn::WarningLog() << pMessage;
ASSERT((flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) == 0);
return VK_FALSE;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h
index a0606b7c7ce..57a25b21d0c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.h
@@ -37,6 +37,7 @@ namespace dawn_native { namespace vulkan {
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
private:
+ MaybeError LoadVulkan();
ResultOrError<VulkanGlobalKnobs> CreateInstance();
MaybeError RegisterDebugReport();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 8408698e167..d69449244a4 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "common/BitSetIterator.h"
+#include "dawn_native/vulkan/BindGroupVk.h"
#include "dawn_native/vulkan/DescriptorSetService.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
@@ -56,10 +57,15 @@ namespace dawn_native { namespace vulkan {
case wgpu::BindingType::SampledTexture:
return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
if (isDynamic) {
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
}
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+ case wgpu::BindingType::StorageTexture:
default:
UNREACHABLE();
}
@@ -76,21 +82,23 @@ namespace dawn_native { namespace vulkan {
}
MaybeError BindGroupLayout::Initialize() {
- const LayoutBindingInfo& info = GetBindingInfo();
-
// Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
// one entry per binding set. This might be optimized by computing continuous ranges of
// bindings of the same type.
uint32_t numBindings = 0;
std::array<VkDescriptorSetLayoutBinding, kMaxBindingsPerGroup> bindings;
- for (uint32_t bindingIndex : IterateBitSet(info.mask)) {
- VkDescriptorSetLayoutBinding* binding = &bindings[numBindings];
- binding->binding = bindingIndex;
- binding->descriptorType =
- VulkanDescriptorType(info.types[bindingIndex], info.hasDynamicOffset[bindingIndex]);
- binding->descriptorCount = 1;
- binding->stageFlags = VulkanShaderStageFlags(info.visibilities[bindingIndex]);
- binding->pImmutableSamplers = nullptr;
+ for (const auto& it : GetBindingMap()) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+ VkDescriptorSetLayoutBinding* vkBinding = &bindings[numBindings];
+ vkBinding->binding = bindingNumber;
+ vkBinding->descriptorType =
+ VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
+ vkBinding->descriptorCount = 1;
+ vkBinding->stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+ vkBinding->pImmutableSamplers = nullptr;
numBindings++;
}
@@ -104,15 +112,16 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
- device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreateDescriptorSetLayout"));
// Compute the size of descriptor pools used for this layout.
std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
- for (uint32_t bindingIndex : IterateBitSet(info.mask)) {
+ for (BindingIndex bindingIndex = 0; bindingIndex < GetBindingCount(); ++bindingIndex) {
+ const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
VkDescriptorType vulkanType =
- VulkanDescriptorType(info.types[bindingIndex], info.hasDynamicOffset[bindingIndex]);
+ VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
// map::operator[] will return 0 if the key doesn't exist.
descriptorCountPerType[vulkanType]++;
@@ -126,6 +135,12 @@ namespace dawn_native { namespace vulkan {
return {};
}
+ BindGroupLayout::BindGroupLayout(DeviceBase* device,
+ const BindGroupLayoutDescriptor* descriptor)
+ : BindGroupLayoutBase(device, descriptor),
+ mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+ }
+
BindGroupLayout::~BindGroupLayout() {
Device* device = ToBackend(GetDevice());
@@ -147,7 +162,19 @@ namespace dawn_native { namespace vulkan {
return mHandle;
}
- ResultOrError<DescriptorSetAllocation> BindGroupLayout::AllocateOneSet() {
+ ResultOrError<BindGroup*> BindGroupLayout::AllocateBindGroup(
+ Device* device,
+ const BindGroupDescriptor* descriptor) {
+ DescriptorSetAllocation descriptorSetAllocation;
+ DAWN_TRY_ASSIGN(descriptorSetAllocation, AllocateOneDescriptorSet());
+ return mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation);
+ }
+
+ void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+ mBindGroupAllocator.Deallocate(bindGroup);
+ }
+
+ ResultOrError<DescriptorSetAllocation> BindGroupLayout::AllocateOneDescriptorSet() {
Device* device = ToBackend(GetDevice());
// Reuse a previous allocation if available.
@@ -170,7 +197,7 @@ namespace dawn_native { namespace vulkan {
VkDescriptorPool descriptorPool;
DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
- nullptr, &descriptorPool),
+ nullptr, &*descriptorPool),
"CreateDescriptorPool"));
// Allocate our single set.
@@ -179,12 +206,13 @@ namespace dawn_native { namespace vulkan {
allocateInfo.pNext = nullptr;
allocateInfo.descriptorPool = descriptorPool;
allocateInfo.descriptorSetCount = 1;
- allocateInfo.pSetLayouts = &mHandle;
+ allocateInfo.pSetLayouts = &*mHandle;
VkDescriptorSet descriptorSet;
- MaybeError result = CheckVkSuccess(
- device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo, &descriptorSet),
- "AllocateDescriptorSets");
+ MaybeError result =
+ CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
+ &*descriptorSet),
+ "AllocateDescriptorSets");
if (result.IsError()) {
// On an error we can destroy the pool immediately because no command references it.
@@ -196,16 +224,17 @@ namespace dawn_native { namespace vulkan {
return {{mAllocations.size() - 1, descriptorSet}};
}
- void BindGroupLayout::Deallocate(DescriptorSetAllocation* allocation) {
+ void BindGroupLayout::DeallocateDescriptorSet(
+ DescriptorSetAllocation* descriptorSetAllocation) {
// We can't reuse the descriptor set right away because the Vulkan spec says in the
// documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
// host execution of the command and the end of the draw/dispatch.
ToBackend(GetDevice())
->GetDescriptorSetService()
- ->AddDeferredDeallocation(this, allocation->index);
+ ->AddDeferredDeallocation(this, descriptorSetAllocation->index);
// Clear the content of allocation so that use after frees are more visible.
- *allocation = {};
+ *descriptorSetAllocation = {};
}
void BindGroupLayout::FinishDeallocation(size_t index) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 947f29d6560..47d8abef57d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -17,12 +17,14 @@
#include "dawn_native/BindGroupLayout.h"
+#include "common/SlabAllocator.h"
#include "common/vulkan_platform.h"
#include <vector>
namespace dawn_native { namespace vulkan {
+ class BindGroup;
class Device;
VkDescriptorType VulkanDescriptorType(wgpu::BindingType type, bool isDynamic);
@@ -49,18 +51,23 @@ namespace dawn_native { namespace vulkan {
public:
static ResultOrError<BindGroupLayout*> Create(Device* device,
const BindGroupLayoutDescriptor* descriptor);
+
+ BindGroupLayout(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
~BindGroupLayout();
VkDescriptorSetLayout GetHandle() const;
- ResultOrError<DescriptorSetAllocation> AllocateOneSet();
- void Deallocate(DescriptorSetAllocation* allocation);
+ ResultOrError<BindGroup*> AllocateBindGroup(Device* device,
+ const BindGroupDescriptor* descriptor);
+ void DeallocateBindGroup(BindGroup* bindGroup);
+
+ ResultOrError<DescriptorSetAllocation> AllocateOneDescriptorSet();
+ void DeallocateDescriptorSet(DescriptorSetAllocation* descriptorSetAllocation);
// Interaction with the DescriptorSetService.
void FinishDeallocation(size_t index);
private:
- using BindGroupLayoutBase::BindGroupLayoutBase;
MaybeError Initialize();
std::vector<VkDescriptorPoolSize> mPoolSizes;
@@ -74,6 +81,8 @@ namespace dawn_native { namespace vulkan {
std::vector<size_t> mAvailableAllocations;
VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
+
+ SlabAllocator<BindGroup> mBindGroupAllocator;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index 5c56030beaf..018fa05de42 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -28,16 +28,14 @@ namespace dawn_native { namespace vulkan {
// static
ResultOrError<BindGroup*> BindGroup::Create(Device* device,
const BindGroupDescriptor* descriptor) {
- std::unique_ptr<BindGroup> group = std::make_unique<BindGroup>(device, descriptor);
- DAWN_TRY(group->Initialize());
- return group.release();
+ return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
}
- MaybeError BindGroup::Initialize() {
- Device* device = ToBackend(GetDevice());
-
- DAWN_TRY_ASSIGN(mAllocation, ToBackend(GetLayout())->AllocateOneSet());
-
+ BindGroup::BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation)
+ : BindGroupBase(this, device, descriptor),
+ mDescriptorSetAllocation(descriptorSetAllocation) {
// Now do a write of a single descriptor set with all possible chained data allocated on the
// stack.
uint32_t numWrites = 0;
@@ -45,34 +43,40 @@ namespace dawn_native { namespace vulkan {
std::array<VkDescriptorBufferInfo, kMaxBindingsPerGroup> writeBufferInfo;
std::array<VkDescriptorImageInfo, kMaxBindingsPerGroup> writeImageInfo;
- const auto& layoutInfo = GetLayout()->GetBindingInfo();
- for (uint32_t bindingIndex : IterateBitSet(layoutInfo.mask)) {
+ for (const auto& it : GetLayout()->GetBindingMap()) {
+ BindingNumber bindingNumber = it.first;
+ BindingIndex bindingIndex = it.second;
+ const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
+
auto& write = writes[numWrites];
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
- write.dstSet = mAllocation.set;
- write.dstBinding = bindingIndex;
+ write.dstSet = GetHandle();
+ write.dstBinding = bindingNumber;
write.dstArrayElement = 0;
write.descriptorCount = 1;
- write.descriptorType = VulkanDescriptorType(layoutInfo.types[bindingIndex],
- layoutInfo.hasDynamicOffset[bindingIndex]);
+ write.descriptorType =
+ VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
- switch (layoutInfo.types[bindingIndex]) {
+ switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
- case wgpu::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
writeBufferInfo[numWrites].buffer = ToBackend(binding.buffer)->GetHandle();
writeBufferInfo[numWrites].offset = binding.offset;
writeBufferInfo[numWrites].range = binding.size;
write.pBufferInfo = &writeBufferInfo[numWrites];
- } break;
+ break;
+ }
case wgpu::BindingType::Sampler: {
Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
writeImageInfo[numWrites].sampler = sampler->GetHandle();
write.pImageInfo = &writeImageInfo[numWrites];
- } break;
+ break;
+ }
case wgpu::BindingType::SampledTexture: {
TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
@@ -84,8 +88,19 @@ namespace dawn_native { namespace vulkan {
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
write.pImageInfo = &writeImageInfo[numWrites];
- } break;
+ break;
+ }
+
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture: {
+ TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+ writeImageInfo[numWrites].imageView = view->GetHandle();
+ writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ write.pImageInfo = &writeImageInfo[numWrites];
+ break;
+ }
default:
UNREACHABLE();
}
@@ -96,16 +111,15 @@ namespace dawn_native { namespace vulkan {
// TODO(cwallez@chromium.org): Batch these updates
device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
nullptr);
-
- return {};
}
BindGroup::~BindGroup() {
- ToBackend(GetLayout())->Deallocate(&mAllocation);
+ ToBackend(GetLayout())->DeallocateDescriptorSet(&mDescriptorSetAllocation);
+ ToBackend(GetLayout())->DeallocateBindGroup(this);
}
VkDescriptorSet BindGroup::GetHandle() const {
- return mAllocation.set;
+ return mDescriptorSetAllocation.set;
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
index 4dd4c218a7c..c912fe2ef27 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
@@ -17,27 +17,30 @@
#include "dawn_native/BindGroup.h"
+#include "common/PlacementAllocated.h"
+#include "common/vulkan_platform.h"
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
namespace dawn_native { namespace vulkan {
class Device;
- class BindGroup : public BindGroupBase {
+ class BindGroup : public BindGroupBase, public PlacementAllocated {
public:
static ResultOrError<BindGroup*> Create(Device* device,
const BindGroupDescriptor* descriptor);
- ~BindGroup();
+
+ BindGroup(Device* device,
+ const BindGroupDescriptor* descriptor,
+ DescriptorSetAllocation descriptorSetAllocation);
+ ~BindGroup() override;
VkDescriptorSet GetHandle() const;
private:
- using BindGroupBase::BindGroupBase;
- MaybeError Initialize();
-
// The descriptor set in this allocation outlives the BindGroup because it is owned by
// the BindGroupLayout which is referenced by the BindGroup.
- DescriptorSetAllocation mAllocation;
+ DescriptorSetAllocation mDescriptorSetAllocation;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index baa71028c22..4a1af605885 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -66,7 +66,8 @@ namespace dawn_native { namespace vulkan {
if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
}
- if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage)) {
+ if (usage &
+ (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kReadOnlyStorage)) {
flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
@@ -122,6 +123,16 @@ namespace dawn_native { namespace vulkan {
}
MaybeError Buffer::Initialize() {
+ // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
+ // some constants to the size passed and align it, but for values close to the maximum
+ // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
+ // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
+ // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
+ // safely return an OOM error.
+ if (GetSize() & (uint64_t(3) << uint64_t(62))) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
+ }
+
VkBufferCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
createInfo.pNext = nullptr;
@@ -136,7 +147,7 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
DAWN_TRY(CheckVkSuccess(
- device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"vkCreateBuffer"));
VkMemoryRequirements requirements;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index d88512233d3..c2c011040ad 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -106,7 +106,7 @@ namespace dawn_native { namespace vulkan {
? dynamicOffsets[dirtyIndex].data()
: nullptr;
device->fn.CmdBindDescriptorSets(commands, bindPoint, pipelineLayout, dirtyIndex, 1,
- &set, dynamicOffsetCounts[dirtyIndex],
+ &*set, dynamicOffsetCounts[dirtyIndex],
dynamicOffset);
}
}
@@ -140,15 +140,17 @@ namespace dawn_native { namespace vulkan {
mDynamicOffsetCounts, mDynamicOffsets);
for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
- for (uint32_t binding : IterateBitSet(mBuffersNeedingBarrier[index])) {
- switch (mBindingTypes[index][binding]) {
+ for (uint32_t bindingIndex : IterateBitSet(mBuffersNeedingBarrier[index])) {
+ switch (mBindingTypes[index][bindingIndex]) {
case wgpu::BindingType::StorageBuffer:
- ToBackend(mBuffers[index][binding])
+ ToBackend(mBuffers[index][bindingIndex])
->TransitionUsageNow(recordingContext,
wgpu::BufferUsage::Storage);
break;
case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
// Not implemented.
case wgpu::BindingType::UniformBuffer:
@@ -179,82 +181,20 @@ namespace dawn_native { namespace vulkan {
for (uint32_t i :
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
- bool hasResolveTarget = attachmentInfo.resolveTarget.Get() != nullptr;
+ const auto& attachmentInfo = renderPass->colorAttachments[i];
+ bool hasResolveTarget = attachmentInfo.resolveTarget.Get() != nullptr;
wgpu::LoadOp loadOp = attachmentInfo.loadOp;
- ASSERT(view->GetLayerCount() == 1);
- ASSERT(view->GetLevelCount() == 1);
- if (loadOp == wgpu::LoadOp::Load &&
- !view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1)) {
- loadOp = wgpu::LoadOp::Clear;
- }
-
- if (hasResolveTarget) {
- // We need to set the resolve target to initialized so that it does not get
- // cleared later in the pipeline. The texture will be resolved from the
- // source color attachment, which will be correctly initialized.
- TextureView* resolveView = ToBackend(attachmentInfo.resolveTarget.Get());
- ToBackend(resolveView->GetTexture())
- ->SetIsSubresourceContentInitialized(
- true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
- resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
- }
-
- switch (attachmentInfo.storeOp) {
- case wgpu::StoreOp::Store: {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
- } break;
-
- case wgpu::StoreOp::Clear: {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
- } break;
-
- default: { UNREACHABLE(); } break;
- }
query.SetColor(i, attachmentInfo.view->GetFormat().format, loadOp,
hasResolveTarget);
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- TextureView* view = ToBackend(attachmentInfo.view.Get());
+ const auto& attachmentInfo = renderPass->depthStencilAttachment;
- // If the depth stencil texture has not been initialized, we want to use loadop
- // clear to init the contents to 0's
- if (!view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount())) {
- if (view->GetTexture()->GetFormat().HasDepth() &&
- attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- }
- if (view->GetTexture()->GetFormat().HasStencil() &&
- attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- }
- }
- query.SetDepthStencil(view->GetTexture()->GetFormat().format,
+ query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
attachmentInfo.depthLoadOp, attachmentInfo.stencilLoadOp);
-
- if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Store &&
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store) {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- } else if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Clear &&
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Clear) {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- }
}
query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
@@ -317,14 +257,14 @@ namespace dawn_native { namespace vulkan {
createInfo.flags = 0;
createInfo.renderPass = renderPassVK;
createInfo.attachmentCount = attachmentCount;
- createInfo.pAttachments = attachments.data();
+ createInfo.pAttachments = AsVkArray(attachments.data());
createInfo.width = renderPass->width;
createInfo.height = renderPass->height;
createInfo.layers = 1;
DAWN_TRY(
CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
- nullptr, &framebuffer),
+ nullptr, &*framebuffer),
"CreateFramebuffer"));
// We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
@@ -463,7 +403,8 @@ namespace dawn_native { namespace vulkan {
VkBuffer srcHandle = srcBuffer->GetHandle();
VkBuffer dstHandle = dstBuffer->GetHandle();
device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
- } break;
+ break;
+ }
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
@@ -497,7 +438,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
- } break;
+ break;
+ }
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
@@ -523,7 +465,8 @@ namespace dawn_native { namespace vulkan {
// The Dawn CopySrc usage is always mapped to GENERAL
device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
dstBuffer, 1, &region);
- } break;
+ break;
+ }
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
@@ -582,16 +525,20 @@ namespace dawn_native { namespace vulkan {
copy->copySize);
}
- } break;
+ break;
+ }
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
TransitionForPass(recordingContext, passResourceUsages[nextPassNumber]);
+
+ LazyClearRenderPassAttachments(cmd);
DAWN_TRY(RecordRenderPass(recordingContext, cmd));
nextPassNumber++;
- } break;
+ break;
+ }
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
@@ -600,9 +547,13 @@ namespace dawn_native { namespace vulkan {
RecordComputePass(recordingContext);
nextPassNumber++;
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
@@ -621,14 +572,15 @@ namespace dawn_native { namespace vulkan {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
return;
- } break;
+ }
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
- } break;
+ break;
+ }
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
@@ -638,7 +590,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdDispatchIndirect(
commands, indirectBuffer,
static_cast<VkDeviceSize>(dispatch->indirectOffset));
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
@@ -651,7 +604,8 @@ namespace dawn_native { namespace vulkan {
descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
dynamicOffsets);
- } break;
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
@@ -660,7 +614,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
pipeline->GetHandle());
descriptorSets.OnSetPipeline(pipeline);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
if (device->GetDeviceInfo().debugMarker) {
@@ -679,7 +634,8 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(&mCommands, Command::InsertDebugMarker);
}
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
if (device->GetDeviceInfo().debugMarker) {
@@ -688,7 +644,8 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(&mCommands, Command::PopDebugGroup);
}
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
if (device->GetDeviceInfo().debugMarker) {
@@ -707,9 +664,13 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(&mCommands, Command::PushDebugGroup);
}
- } break;
+ break;
+ }
- default: { UNREACHABLE(); } break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
}
}
@@ -768,7 +729,8 @@ namespace dawn_native { namespace vulkan {
descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
- } break;
+ break;
+ }
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
@@ -777,7 +739,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
draw->firstIndex, draw->baseVertex,
draw->firstInstance);
- } break;
+ break;
+ }
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
@@ -787,7 +750,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdDrawIndirect(commands, indirectBuffer,
static_cast<VkDeviceSize>(draw->indirectOffset), 1,
0);
- } break;
+ break;
+ }
case Command::DrawIndexedIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
@@ -797,7 +761,8 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdDrawIndexedIndirect(
commands, indirectBuffer, static_cast<VkDeviceSize>(draw->indirectOffset),
1, 0);
- } break;
+ break;
+ }
case Command::InsertDebugMarker: {
if (device->GetDeviceInfo().debugMarker) {
@@ -816,7 +781,8 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(iter, Command::InsertDebugMarker);
}
- } break;
+ break;
+ }
case Command::PopDebugGroup: {
if (device->GetDeviceInfo().debugMarker) {
@@ -825,7 +791,8 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(iter, Command::PopDebugGroup);
}
- } break;
+ break;
+ }
case Command::PushDebugGroup: {
if (device->GetDeviceInfo().debugMarker) {
@@ -844,7 +811,8 @@ namespace dawn_native { namespace vulkan {
} else {
SkipCommand(iter, Command::PushDebugGroup);
}
- } break;
+ break;
+ }
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
@@ -856,7 +824,8 @@ namespace dawn_native { namespace vulkan {
descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
dynamicOffsets);
- } break;
+ break;
+ }
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
@@ -869,7 +838,8 @@ namespace dawn_native { namespace vulkan {
VulkanIndexType(lastPipeline->GetVertexStateDescriptor()->indexFormat);
device->fn.CmdBindIndexBuffer(
commands, indexBuffer, static_cast<VkDeviceSize>(cmd->offset), indexType);
- } break;
+ break;
+ }
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
@@ -880,15 +850,17 @@ namespace dawn_native { namespace vulkan {
lastPipeline = pipeline;
descriptorSets.OnSetPipeline(pipeline);
- } break;
+ break;
+ }
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
- device->fn.CmdBindVertexBuffers(commands, cmd->slot, 1, &buffer, &offset);
- } break;
+ device->fn.CmdBindVertexBuffers(commands, cmd->slot, 1, &*buffer, &offset);
+ break;
+ }
default:
UNREACHABLE();
@@ -903,7 +875,7 @@ namespace dawn_native { namespace vulkan {
mCommands.NextCommand<EndRenderPassCmd>();
device->fn.CmdEndRenderPass(commands);
return {};
- } break;
+ }
case Command::SetBlendColor: {
SetBlendColorCmd* cmd = mCommands.NextCommand<SetBlendColorCmd>();
@@ -914,13 +886,15 @@ namespace dawn_native { namespace vulkan {
cmd->color.a,
};
device->fn.CmdSetBlendConstants(commands, blendConstants);
- } break;
+ break;
+ }
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
cmd->reference);
- } break;
+ break;
+ }
case Command::SetViewport: {
SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
@@ -933,7 +907,8 @@ namespace dawn_native { namespace vulkan {
viewport.maxDepth = cmd->maxDepth;
device->fn.CmdSetViewport(commands, 0, 1, &viewport);
- } break;
+ break;
+ }
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
@@ -944,7 +919,8 @@ namespace dawn_native { namespace vulkan {
rect.extent.height = cmd->height;
device->fn.CmdSetScissor(commands, 0, 1, &rect);
- } break;
+ break;
+ }
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
@@ -957,9 +933,13 @@ namespace dawn_native { namespace vulkan {
EncodeRenderBundleCommand(iter, type);
}
}
- } break;
+ break;
+ }
- default: { EncodeRenderBundleCommand(&mCommands, type); } break;
+ default: {
+ EncodeRenderBundleCommand(&mCommands, type);
+ break;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
index 2f37620ded2..16dd8e7d153 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ComputePipelineVk.cpp
@@ -38,7 +38,7 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.layout = ToBackend(descriptor->layout)->GetHandle();
- createInfo.basePipelineHandle = VK_NULL_HANDLE;
+ createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
createInfo.basePipelineIndex = -1;
createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
@@ -51,8 +51,8 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
return CheckVkSuccess(
- device->fn.CreateComputePipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1, &createInfo,
- nullptr, &mHandle),
+ device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
+ &createInfo, nullptr, &*mHandle),
"CreateComputePipeline");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index a5f7788473b..e239a64d81e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -50,6 +50,9 @@ namespace dawn_native { namespace vulkan {
if (descriptor != nullptr) {
ApplyToggleOverrides(descriptor);
}
+
+ // Set the device as lost until successfully created.
+ mLossStatus = LossStatus::AlreadyLost;
}
MaybeError Device::Initialize() {
@@ -79,77 +82,38 @@ namespace dawn_native { namespace vulkan {
DAWN_TRY(PrepareRecordingContext());
+ // The environment can request to use D32S8 or D24S8 when it's not available. Override
+ // the decision if it is not applicable.
+ ApplyDepth24PlusS8Toggle();
+
return {};
}
Device::~Device() {
- // Immediately tag the recording context as unused so we don't try to submit it in Tick.
- mRecordingContext.used = false;
- fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
-
- VkResult waitIdleResult = fn.QueueWaitIdle(mQueue);
- // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
- // about, Device lost, which means workloads running on the GPU are no longer accessible
- // (so they are as good as waited on) or success.
- DAWN_UNUSED(waitIdleResult);
-
- CheckPassedFences();
-
- // Make sure all fences are complete by explicitly waiting on them all
- while (!mFencesInFlight.empty()) {
- VkFence fence = mFencesInFlight.front().first;
- Serial fenceSerial = mFencesInFlight.front().second;
- ASSERT(fenceSerial > mCompletedSerial);
+ BaseDestructor();
- VkResult result = VK_TIMEOUT;
- do {
- result = fn.WaitForFences(mVkDevice, 1, &fence, true, UINT64_MAX);
- } while (result == VK_TIMEOUT);
- fn.DestroyFence(mVkDevice, fence, nullptr);
-
- mFencesInFlight.pop();
- mCompletedSerial = fenceSerial;
- }
-
- // Some operations might have been started since the last submit and waiting
- // on a serial that doesn't have a corresponding fence enqueued. Force all
- // operations to look as if they were completed (because they were).
- mCompletedSerial = mLastSubmittedSerial + 1;
- Tick();
-
- ASSERT(mCommandsInFlight.Empty());
- for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
- fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
- }
- mUnusedCommands.clear();
-
- // TODO(jiajie.hu@intel.com): In rare cases, a DAWN_TRY() failure may leave semaphores
- // untagged for deletion. But for most of the time when everything goes well, these
- // assertions can be helpful in catching bugs.
- ASSERT(mRecordingContext.waitSemaphores.empty());
- ASSERT(mRecordingContext.signalSemaphores.empty());
-
- for (VkFence fence : mUnusedFences) {
- fn.DestroyFence(mVkDevice, fence, nullptr);
- }
- mUnusedFences.clear();
-
- // Free services explicitly so that they can free Vulkan objects before vkDestroyDevice
- mDynamicUploader = nullptr;
mDescriptorSetService = nullptr;
- // Releasing the uploader enqueues buffers to be released.
- // Call Tick() again to clear them before releasing the deleter.
- mDeleter->Tick(mCompletedSerial);
-
- mDeleter = nullptr;
- mMapRequestTracker = nullptr;
+ // The frontend asserts DynamicUploader is destructed by the backend.
+ // It is usually destructed in Destroy(), but Destroy isn't always called if device
+ // initialization failed.
+ mDynamicUploader = nullptr;
- // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
- // to them are guaranteed to be finished executing.
- mRenderPassCache = nullptr;
+ // We still need to properly handle Vulkan object deletion even if the device has been lost,
+ // so the Deleter and vkDevice cannot be destroyed in Device::Destroy().
+ // We need handle deleting all child objects by calling Tick() again with a large serial to
+ // force all operations to look as if they were completed, and delete all objects before
+ // destroying the Deleter and vkDevice.
+ // The Deleter may be null if initialization failed.
+ if (mDeleter != nullptr) {
+ mCompletedSerial = std::numeric_limits<Serial>::max();
+ mDeleter->Tick(mCompletedSerial);
+ mDeleter = nullptr;
+ }
// VkQueues are destroyed when the VkDevice is destroyed
+ // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
+ // child objects have been deleted.
if (mVkDevice != VK_NULL_HANDLE) {
fn.DestroyDevice(mVkDevice, nullptr);
mVkDevice = VK_NULL_HANDLE;
@@ -197,6 +161,12 @@ namespace dawn_native { namespace vulkan {
const SwapChainDescriptor* descriptor) {
return SwapChain::Create(this, descriptor);
}
+ ResultOrError<NewSwapChainBase*> Device::CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) {
+ return DAWN_VALIDATION_ERROR("New swapchains not implemented.");
+ }
ResultOrError<TextureBase*> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
return Texture::Create(this, descriptor);
}
@@ -302,13 +272,13 @@ namespace dawn_native { namespace vulkan {
submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount =
static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
- submitInfo.pWaitSemaphores = mRecordingContext.waitSemaphores.data();
+ submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
submitInfo.pWaitDstStageMask = dstStageMasks.data();
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
submitInfo.signalSemaphoreCount =
static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
- submitInfo.pSignalSemaphores = mRecordingContext.signalSemaphores.data();
+ submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
VkFence fence = VK_NULL_HANDLE;
DAWN_TRY_ASSIGN(fence, GetUnusedFence());
@@ -415,7 +385,7 @@ namespace dawn_native { namespace vulkan {
}
if (universalQueueFamily == -1) {
- return DAWN_DEVICE_LOST_ERROR("No universal queue family");
+ return DAWN_INTERNAL_ERROR("No universal queue family");
}
mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
}
@@ -448,6 +418,8 @@ namespace dawn_native { namespace vulkan {
DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
"vkCreateDevice"));
+ // Device created. Mark it as alive.
+ mLossStatus = LossStatus::Alive;
return usedKnobs;
}
@@ -459,6 +431,40 @@ namespace dawn_native { namespace vulkan {
// TODO(jiawei.shao@intel.com): tighten this workaround when this issue is fixed in both
// Vulkan SPEC and drivers.
SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
+
+ // By default try to use D32S8 for Depth24PlusStencil8
+ SetToggle(Toggle::VulkanUseD32S8, true);
+ }
+
+ void Device::ApplyDepth24PlusS8Toggle() {
+ VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
+
+ bool supportsD32s8 = false;
+ {
+ VkFormatProperties properties;
+ fn.GetPhysicalDeviceFormatProperties(physicalDevice, VK_FORMAT_D32_SFLOAT_S8_UINT,
+ &properties);
+ supportsD32s8 =
+ properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ bool supportsD24s8 = false;
+ {
+ VkFormatProperties properties;
+ fn.GetPhysicalDeviceFormatProperties(physicalDevice, VK_FORMAT_D24_UNORM_S8_UINT,
+ &properties);
+ supportsD24s8 =
+ properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ ASSERT(supportsD32s8 || supportsD24s8);
+
+ if (!supportsD24s8) {
+ SetToggle(Toggle::VulkanUseD32S8, true);
+ }
+ if (!supportsD32s8) {
+ SetToggle(Toggle::VulkanUseD32S8, false);
+ }
}
VulkanFunctions* Device::GetMutableFunctions() {
@@ -468,7 +474,7 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VkFence> Device::GetUnusedFence() {
if (!mUnusedFences.empty()) {
VkFence fence = mUnusedFences.back();
- DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &fence), "vkResetFences"));
+ DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
mUnusedFences.pop_back();
return fence;
@@ -480,7 +486,7 @@ namespace dawn_native { namespace vulkan {
createInfo.flags = 0;
VkFence fence = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &fence),
+ DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
"vkCreateFence"));
return fence;
@@ -491,7 +497,9 @@ namespace dawn_native { namespace vulkan {
VkFence fence = mFencesInFlight.front().first;
Serial fenceSerial = mFencesInFlight.front().second;
- VkResult result = fn.GetFenceStatus(mVkDevice, fence);
+ VkResult result = VkResult::WrapUnsafe(
+ INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
+ // TODO: Handle DeviceLost error.
ASSERT(result == VK_SUCCESS || result == VK_NOT_READY);
// Fence are added in order, so we can stop searching as soon
@@ -531,7 +539,7 @@ namespace dawn_native { namespace vulkan {
createInfo.queueFamilyIndex = mQueueFamily;
DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
- &mRecordingContext.commandPool),
+ &*mRecordingContext.commandPool),
"vkCreateCommandPool"));
VkCommandBufferAllocateInfo allocateInfo;
@@ -616,7 +624,7 @@ namespace dawn_native { namespace vulkan {
return DAWN_VALIDATION_ERROR("External semaphore usage not supported");
}
if (!mExternalMemoryService->SupportsImportMemory(
- VulkanImageFormat(textureDescriptor->format), VK_IMAGE_TYPE_2D,
+ VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
VK_IMAGE_TILING_OPTIMAL,
VulkanImageUsage(textureDescriptor->usage,
GetValidInternalFormat(textureDescriptor->format)),
@@ -730,4 +738,86 @@ namespace dawn_native { namespace vulkan {
return mResourceMemoryAllocator.get();
}
+ MaybeError Device::WaitForIdleForDestruction() {
+ VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
+ // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+ // about, Device lost, which means workloads running on the GPU are no longer accessible
+ // (so they are as good as waited on) or success.
+ DAWN_UNUSED(waitIdleResult);
+
+ CheckPassedFences();
+
+ // Make sure all fences are complete by explicitly waiting on them all
+ while (!mFencesInFlight.empty()) {
+ VkFence fence = mFencesInFlight.front().first;
+ Serial fenceSerial = mFencesInFlight.front().second;
+ ASSERT(fenceSerial > mCompletedSerial);
+
+ VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
+ do {
+ result = VkResult::WrapUnsafe(
+ INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
+ VK_ERROR_DEVICE_LOST));
+ } while (result == VK_TIMEOUT);
+
+ // TODO: Handle errors
+ ASSERT(result == VK_SUCCESS);
+ fn.DestroyFence(mVkDevice, fence, nullptr);
+
+ mFencesInFlight.pop();
+ mCompletedSerial = fenceSerial;
+ }
+ return {};
+ }
+
+ void Device::Destroy() {
+ ASSERT(mLossStatus != LossStatus::AlreadyLost);
+
+ // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+ mRecordingContext.used = false;
+ fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+ }
+ mRecordingContext.waitSemaphores.clear();
+
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+ }
+ mRecordingContext.signalSemaphores.clear();
+
+ // Some operations might have been started since the last submit and waiting
+ // on a serial that doesn't have a corresponding fence enqueued. Force all
+ // operations to look as if they were completed (because they were).
+ mCompletedSerial = mLastSubmittedSerial + 1;
+
+ // Assert that errors are device loss so that we can continue with destruction
+ AssertAndIgnoreDeviceLossError(TickImpl());
+
+ ASSERT(mCommandsInFlight.Empty());
+ for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+ fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+ }
+ mUnusedCommands.clear();
+
+ for (VkFence fence : mUnusedFences) {
+ fn.DestroyFence(mVkDevice, fence, nullptr);
+ }
+ mUnusedFences.clear();
+
+ // Free services explicitly so that they can free Vulkan objects before vkDestroyDevice
+ mDynamicUploader = nullptr;
+
+ // Releasing the uploader enqueues buffers to be released.
+ // Call Tick() again to clear them before releasing the deleter.
+ mDeleter->Tick(mCompletedSerial);
+
+ mMapRequestTracker = nullptr;
+
+ // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
+ // to them are guaranteed to be finished executing.
+ mRenderPassCache = nullptr;
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index e5210d6b9f5..a4445f26f72 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -36,7 +36,6 @@ namespace dawn_native { namespace vulkan {
class Adapter;
class BufferUploader;
class DescriptorSetService;
- struct ExternalImageDescriptor;
class FencedDeleter;
class MapRequestTracker;
class RenderPassCache;
@@ -68,6 +67,8 @@ namespace dawn_native { namespace vulkan {
Serial GetPendingCommandSerial() const override;
MaybeError SubmitPendingCommands();
+ // Dawn Native API
+
TextureBase* CreateTextureWrappingVulkanImage(
const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
@@ -117,6 +118,10 @@ namespace dawn_native { namespace vulkan {
const ShaderModuleDescriptor* descriptor) override;
ResultOrError<SwapChainBase*> CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) override;
+ ResultOrError<NewSwapChainBase*> CreateSwapChainImpl(
+ Surface* surface,
+ NewSwapChainBase* previousSwapChain,
+ const SwapChainDescriptor* descriptor) override;
ResultOrError<TextureBase*> CreateTextureImpl(const TextureDescriptor* descriptor) override;
ResultOrError<TextureViewBase*> CreateTextureViewImpl(
TextureBase* texture,
@@ -126,6 +131,10 @@ namespace dawn_native { namespace vulkan {
void GatherQueueFromDevice();
void InitTogglesFromDriver();
+ void ApplyDepth24PlusS8Toggle();
+
+ void Destroy() override;
+ MaybeError WaitForIdleForDestruction() override;
// To make it easier to use fn it is a public const member. However
// the Device is allowed to mutate them through these private methods.
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
index e359d7033fb..11bf0899ebb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
@@ -66,6 +66,9 @@ namespace dawn_native { namespace vulkan {
NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
: mSurface(surface), mDevice(device) {
+ // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
+ // will return a correct result before a SwapChain is created.
+ UpdateSurfaceConfig();
}
NativeSwapChainImpl::~NativeSwapChainImpl() {
@@ -80,8 +83,8 @@ namespace dawn_native { namespace vulkan {
}
void NativeSwapChainImpl::UpdateSurfaceConfig() {
- if (mDevice->ConsumedError(
- GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface, &mInfo))) {
+ if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
+ &mInfo)) {
ASSERT(false);
}
@@ -133,7 +136,7 @@ namespace dawn_native { namespace vulkan {
createInfo.oldSwapchain = oldSwapchain;
if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
- &mSwapChain) != VK_SUCCESS) {
+ &*mSwapChain) != VK_SUCCESS) {
ASSERT(false);
}
@@ -148,7 +151,7 @@ namespace dawn_native { namespace vulkan {
ASSERT(count >= mConfig.minImageCount);
mSwapChainImages.resize(count);
if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
- mSwapChainImages.data()) != VK_SUCCESS) {
+ AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
ASSERT(false);
}
@@ -165,7 +168,7 @@ namespace dawn_native { namespace vulkan {
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.srcQueueFamilyIndex = 0;
barrier.dstQueueFamilyIndex = 0;
- barrier.image = image;
+ barrier.image = *image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
@@ -194,18 +197,22 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr;
createInfo.flags = 0;
if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
- &semaphore) != VK_SUCCESS) {
+ &*semaphore) != VK_SUCCESS) {
ASSERT(false);
}
}
if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
std::numeric_limits<uint64_t>::max(), semaphore,
- VK_NULL_HANDLE, &mLastImageIndex) != VK_SUCCESS) {
+ VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
ASSERT(false);
}
- nextTexture->texture.u64 = mSwapChainImages[mLastImageIndex].GetU64();
+ nextTexture->texture.u64 =
+#if defined(DAWN_PLATFORM_64_BIT)
+ reinterpret_cast<uint64_t>
+#endif
+ (*mSwapChainImages[mLastImageIndex]);
mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
return DAWN_SWAP_CHAIN_NO_ERROR;
@@ -224,7 +231,7 @@ namespace dawn_native { namespace vulkan {
presentInfo.waitSemaphoreCount = 0;
presentInfo.pWaitSemaphores = nullptr;
presentInfo.swapchainCount = 1;
- presentInfo.pSwapchains = &mSwapChain;
+ presentInfo.pSwapchains = &*mSwapChain;
presentInfo.pImageIndices = &mLastImageIndex;
presentInfo.pResults = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
index dd123af35ed..847ba60f82a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/PipelineLayoutVk.cpp
@@ -48,13 +48,13 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.setLayoutCount = numSetLayouts;
- createInfo.pSetLayouts = setLayouts.data();
+ createInfo.pSetLayouts = AsVkArray(setLayouts.data());
createInfo.pushConstantRangeCount = 0;
createInfo.pPushConstantRanges = nullptr;
Device* device = ToBackend(GetDevice());
return CheckVkSuccess(
- device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreatePipelineLayout");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
index 1f3f940379a..47330f12045 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
@@ -108,7 +108,7 @@ namespace dawn_native { namespace vulkan {
attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(query.colorFormats[i]);
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
attachmentDesc.samples = vkSampleCount;
attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
@@ -129,7 +129,7 @@ namespace dawn_native { namespace vulkan {
depthStencilAttachmentRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(query.depthStencilFormat);
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
attachmentDesc.samples = vkSampleCount;
attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
@@ -150,7 +150,7 @@ namespace dawn_native { namespace vulkan {
attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachmentDesc.flags = 0;
- attachmentDesc.format = VulkanImageFormat(query.colorFormats[i]);
+ attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
@@ -191,9 +191,9 @@ namespace dawn_native { namespace vulkan {
// Create the render pass from the zillion parameters
VkRenderPass renderPass;
- DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo, nullptr, &renderPass),
- "CreateRenderPass"));
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
+ nullptr, &*renderPass),
+ "CreateRenderPass"));
return renderPass;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index 4b770a518aa..88447fb73e7 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -144,6 +144,8 @@ namespace dawn_native { namespace vulkan {
return VK_FRONT_FACE_COUNTER_CLOCKWISE;
case wgpu::FrontFace::CW:
return VK_FRONT_FACE_CLOCKWISE;
+ default:
+ UNREACHABLE();
}
}
@@ -155,6 +157,8 @@ namespace dawn_native { namespace vulkan {
return VK_CULL_MODE_FRONT_BIT;
case wgpu::CullMode::Back:
return VK_CULL_MODE_BACK_BIT;
+ default:
+ UNREACHABLE();
}
}
@@ -495,12 +499,12 @@ namespace dawn_native { namespace vulkan {
createInfo.layout = ToBackend(GetLayout())->GetHandle();
createInfo.renderPass = renderPass;
createInfo.subpass = 0;
- createInfo.basePipelineHandle = VK_NULL_HANDLE;
+ createInfo.basePipelineHandle = VkPipeline{};
createInfo.basePipelineIndex = -1;
return CheckVkSuccess(
- device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VK_NULL_HANDLE, 1,
- &createInfo, nullptr, &mHandle),
+ device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
+ &createInfo, nullptr, &*mHandle),
"CreateGraphicsPipeline");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
index 58dcd666d6c..3c2ae563d6c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -69,15 +69,12 @@ namespace dawn_native { namespace vulkan {
allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- VkResult allocationResult = mDevice->fn.AllocateMemory(
- mDevice->GetVkDevice(), &allocateInfo, nullptr, &allocatedMemory);
- // Handle vkAllocateMemory error but differentiate OOM that we want to surface to
- // the application.
- if (allocationResult == VK_ERROR_OUT_OF_DEVICE_MEMORY) {
- return DAWN_OUT_OF_MEMORY_ERROR("OOM while creating the Vkmemory");
- }
- DAWN_TRY(CheckVkSuccess(allocationResult, "vkAllocateMemory"));
+ // First check OOM that we want to surface to the application.
+ DAWN_TRY(CheckVkOOMThenSuccess(
+ mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
+ &*allocatedMemory),
+ "vkAllocateMemory"));
ASSERT(allocatedMemory != VK_NULL_HANDLE);
return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
@@ -115,31 +112,35 @@ namespace dawn_native { namespace vulkan {
VkDeviceSize size = requirements.size;
- // If the resource is too big, allocate memory just for it.
- // Also allocate mappable resources separately because at the moment the mapped pointer
+ // Sub-allocate non-mappable resources because at the moment the mapped pointer
// is part of the resource and not the heap, which doesn't match the Vulkan model.
// TODO(cwallez@chromium.org): allow sub-allocating mappable resources, maybe.
- if (requirements.size >= kMaxSizeForSubAllocation || mappable) {
- std::unique_ptr<ResourceHeapBase> resourceHeap;
- DAWN_TRY_ASSIGN(resourceHeap,
- mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
-
- void* mappedPointer = nullptr;
- if (mappable) {
- DAWN_TRY(
- CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
- ToBackend(resourceHeap.get())->GetMemory(),
- 0, size, 0, &mappedPointer),
- "vkMapMemory"));
+ if (requirements.size < kMaxSizeForSubAllocation && !mappable) {
+ ResourceMemoryAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation,
+ mAllocatorsPerType[memoryType]->AllocateMemory(requirements));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return subAllocation;
}
+ }
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
- return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
- static_cast<uint8_t*>(mappedPointer));
- } else {
- return mAllocatorsPerType[memoryType]->AllocateMemory(requirements);
+ // If sub-allocation failed, allocate memory just for it.
+ std::unique_ptr<ResourceHeapBase> resourceHeap;
+ DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
+
+ void* mappedPointer = nullptr;
+ if (mappable) {
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+ ToBackend(resourceHeap.get())->GetMemory(), 0,
+ size, 0, &mappedPointer),
+ "vkMapMemory"));
}
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+ static_cast<uint8_t*>(mappedPointer));
}
void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
index 05baf71fc92..67d70f8d7c2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
@@ -87,7 +87,7 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
return CheckVkSuccess(
- device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreateSampler");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index 96e31270d5d..60c6ba6e19a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -40,31 +40,42 @@ namespace dawn_native { namespace vulkan {
// Use SPIRV-Cross to extract info from the SPIRV even if Vulkan consumes SPIRV. We want to
// have a translation step eventually anyway.
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
- shaderc_spvc::CompileOptions options;
- shaderc_spvc_status status =
- mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
- if (status != shaderc_spvc_status_success) {
- return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
- }
+ shaderc_spvc::CompileOptions options = GetCompileOptions();
- spirv_cross::Compiler* compiler =
- reinterpret_cast<spirv_cross::Compiler*>(mSpvcContext.GetCompiler());
- ExtractSpirvInfo(*compiler);
+ DAWN_TRY(CheckSpvcSuccess(
+ mSpvcContext.InitializeForVulkan(descriptor->code, descriptor->codeSize, options),
+ "Unable to initialize instance of spvc"));
+
+ spirv_cross::Compiler* compiler;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
+ "Unable to get cross compiler"));
+ DAWN_TRY(ExtractSpirvInfo(*compiler));
} else {
spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
- ExtractSpirvInfo(compiler);
+ DAWN_TRY(ExtractSpirvInfo(compiler));
}
VkShaderModuleCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.codeSize = descriptor->codeSize * sizeof(uint32_t);
- createInfo.pCode = descriptor->code;
+ std::vector<uint32_t> vulkanSource;
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompilationResult result;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.CompileShader(&result),
+ "Unable to generate Vulkan shader"));
+ DAWN_TRY(CheckSpvcSuccess(result.GetBinaryOutput(&vulkanSource),
+ "Unable to get binary output of Vulkan shader"));
+ createInfo.codeSize = vulkanSource.size() * sizeof(uint32_t);
+ createInfo.pCode = vulkanSource.data();
+ } else {
+ createInfo.codeSize = descriptor->codeSize * sizeof(uint32_t);
+ createInfo.pCode = descriptor->code;
+ }
Device* device = ToBackend(GetDevice());
return CheckVkSuccess(
- device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreateShaderModule");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
index 42623188e3e..dfdb9786d7b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
@@ -36,7 +36,7 @@ namespace dawn_native { namespace vulkan {
createInfo.pQueueFamilyIndices = 0;
DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &mBuffer),
+ mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
"vkCreateBuffer"));
VkMemoryRequirements requirements;
@@ -52,7 +52,7 @@ namespace dawn_native { namespace vulkan {
mMappedPointer = mAllocation.GetMappedPointer();
if (mMappedPointer == nullptr) {
- return DAWN_DEVICE_LOST_ERROR("Unable to map staging buffer.");
+ return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index 52a1f728f09..44ec020b5b7 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -25,7 +25,7 @@ namespace dawn_native { namespace vulkan {
}
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
- : SwapChainBase(device, descriptor) {
+ : OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextVulkan wsiContext = {};
im.Init(im.userData, &wsiContext);
@@ -43,11 +43,12 @@ namespace dawn_native { namespace vulkan {
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
+ GetDevice()->HandleError(InternalErrorType::Internal, error);
return nullptr;
}
- VkImage nativeTexture = VkImage::CreateFromU64(next.texture.u64);
+ VkImage nativeTexture =
+ VkImage::CreateFromHandle(reinterpret_cast<::VkImage>(next.texture.u64));
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
index 1d8ce43ac72..f878996e31f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
@@ -23,7 +23,7 @@ namespace dawn_native { namespace vulkan {
class Device;
- class SwapChain : public SwapChainBase {
+ class SwapChain : public OldSwapChainBase {
public:
static SwapChain* Create(Device* device, const SwapChainDescriptor* descriptor);
~SwapChain();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index 5d119764550..90fae81909d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -203,7 +203,7 @@ namespace dawn_native { namespace vulkan {
} // namespace
// Converts Dawn texture format to Vulkan formats.
- VkFormat VulkanImageFormat(wgpu::TextureFormat format) {
+ VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8Unorm:
return VK_FORMAT_R8_UNORM;
@@ -285,7 +285,15 @@ namespace dawn_native { namespace vulkan {
case wgpu::TextureFormat::Depth24Plus:
return VK_FORMAT_D32_SFLOAT;
case wgpu::TextureFormat::Depth24PlusStencil8:
- return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ // Depth24PlusStencil8 maps to either of these two formats because only requires
+ // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
+ // the environment, default to using D32S8, and availability information so we know
+ // that the format is available.
+ if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
+ return VK_FORMAT_D32_SFLOAT_S8_UINT;
+ } else {
+ return VK_FORMAT_D24_UNORM_S8_UINT;
+ }
case wgpu::TextureFormat::BC1RGBAUnorm:
return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
@@ -428,7 +436,7 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.imageType = VulkanImageType(GetDimension());
- createInfo.format = VulkanImageFormat(GetFormat().format);
+ createInfo.format = VulkanImageFormat(device, GetFormat().format);
createInfo.extent = VulkanExtent3D(GetSize());
createInfo.mipLevels = GetNumMipLevels();
createInfo.arrayLayers = GetArrayLayers();
@@ -452,7 +460,7 @@ namespace dawn_native { namespace vulkan {
createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreateImage"));
// Create the image memory and associate it with the container
@@ -484,7 +492,7 @@ namespace dawn_native { namespace vulkan {
// Internally managed, but imported from external handle
MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptor* descriptor,
external_memory::Service* externalMemoryService) {
- VkFormat format = VulkanImageFormat(GetFormat().format);
+ VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
VkImageUsageFlags usage = VulkanImageUsage(GetUsage(), GetFormat());
if (!externalMemoryService->SupportsCreateImage(descriptor, format, usage)) {
return DAWN_VALIDATION_ERROR("Creating an image from external memory is not supported");
@@ -669,30 +677,45 @@ namespace dawn_native { namespace vulkan {
uint32_t layerCount,
TextureBase::ClearValue clearValue) {
Device* device = ToBackend(GetDevice());
- VkImageSubresourceRange range = {};
- range.aspectMask = GetVkAspectMask();
- range.baseMipLevel = baseMipLevel;
- range.levelCount = levelCount;
- range.baseArrayLayer = baseArrayLayer;
- range.layerCount = layerCount;
+
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+ float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
if (GetFormat().isRenderable) {
- if (GetFormat().HasDepthOrStencil()) {
- VkClearDepthStencilValue clearDepthStencilValue[1];
- clearDepthStencilValue[0].depth = clearColor;
- clearDepthStencilValue[0].stencil = clearColor;
- device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- clearDepthStencilValue, 1, &range);
- } else {
- float fClearColor = static_cast<float>(clearColor);
- VkClearColorValue clearColorValue = {
- {fClearColor, fClearColor, fClearColor, fClearColor}};
- device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
- &clearColorValue, 1, &range);
+ VkImageSubresourceRange range = {};
+ range.aspectMask = GetVkAspectMask();
+ range.levelCount = 1;
+ range.layerCount = 1;
+
+ for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ range.baseMipLevel = level;
+ for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
+ ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ range.baseArrayLayer = layer;
+
+ if (GetFormat().HasDepthOrStencil()) {
+ VkClearDepthStencilValue clearDepthStencilValue[1];
+ clearDepthStencilValue[0].depth = fClearColor;
+ clearDepthStencilValue[0].stencil = clearColor;
+ device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer,
+ GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ clearDepthStencilValue, 1, &range);
+ } else {
+ VkClearColorValue clearColorValue = {
+ {fClearColor, fClearColor, fClearColor, fClearColor}};
+ device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ &clearColorValue, 1, &range);
+ }
+ }
}
} else {
// TODO(natlee@microsoft.com): test compressed textures are cleared
@@ -709,9 +732,7 @@ namespace dawn_native { namespace vulkan {
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
- std::fill(reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer),
- reinterpret_cast<uint32_t*>(uploadHandle.mappedBuffer + bufferSize),
- clearColor);
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
// compute the buffer image copy to set the clear region of entire texture
dawn_native::BufferCopy bufferCopy;
@@ -719,11 +740,17 @@ namespace dawn_native { namespace vulkan {
bufferCopy.offset = uploadHandle.startOffset;
bufferCopy.rowPitch = rowPitch;
- Extent3D copySize = {GetSize().width, GetSize().height, 1};
-
for (uint32_t level = baseMipLevel; level < baseMipLevel + levelCount; ++level) {
+ Extent3D copySize = GetMipLevelVirtualSize(level);
+
for (uint32_t layer = baseArrayLayer; layer < baseArrayLayer + layerCount;
++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(level, 1, layer, 1)) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
dawn_native::TextureCopy textureCopy;
textureCopy.texture = this;
textureCopy.origin = {0, 0, 0};
@@ -790,7 +817,7 @@ namespace dawn_native { namespace vulkan {
createInfo.flags = 0;
createInfo.image = ToBackend(GetTexture())->GetHandle();
createInfo.viewType = VulkanImageViewType(descriptor->dimension);
- createInfo.format = VulkanImageFormat(descriptor->format);
+ createInfo.format = VulkanImageFormat(device, descriptor->format);
createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
createInfo.subresourceRange.aspectMask = VulkanAspectMask(GetFormat());
@@ -800,7 +827,7 @@ namespace dawn_native { namespace vulkan {
createInfo.subresourceRange.layerCount = descriptor->arrayLayerCount;
return CheckVkSuccess(
- device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
"CreateImageView");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 82366ae0ef0..2c898ffcc1d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -26,9 +26,8 @@ namespace dawn_native { namespace vulkan {
struct CommandRecordingContext;
class Device;
- struct ExternalImageDescriptor;
- VkFormat VulkanImageFormat(wgpu::TextureFormat format);
+ VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
index ad99a27401d..f24d8ca75f1 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
@@ -42,7 +42,7 @@ namespace dawn_native { namespace vulkan {
// Explicitly export this function because it uses the "native" type for surfaces while the
// header as seen in this file uses the wrapped type.
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(WGPUDevice device, VkSurfaceKHRNative surfaceNative) {
+ CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
Device* backendDevice = reinterpret_cast<Device*>(device);
VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
@@ -60,10 +60,6 @@ namespace dawn_native { namespace vulkan {
}
#ifdef DAWN_PLATFORM_LINUX
- ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageDescriptorType type)
- : type(type) {
- }
-
ExternalImageDescriptorFD::ExternalImageDescriptorFD(ExternalImageDescriptorType type)
: ExternalImageDescriptor(type) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
index 543c9b0964c..9d809ba1fa0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.cpp
@@ -18,8 +18,12 @@
namespace dawn_native { namespace vulkan {
- const char* VkResultAsString(VkResult result) {
- switch (result) {
+ const char* VkResultAsString(::VkResult result) {
+ // Convert to a uint32_t to silence and MSVC warning that the fake errors don't appear in
+ // the original VkResult enum.
+ uint32_t code = static_cast<uint32_t>(result);
+
+ switch (code) {
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
@@ -56,18 +60,43 @@ namespace dawn_native { namespace vulkan {
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_FRAGMENTED_POOL:
return "VK_ERROR_FRAGMENTED_POOL";
+ case VK_FAKE_DEVICE_OOM_FOR_TESTING:
+ return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
+ case VK_FAKE_ERROR_FOR_TESTING:
+ return "VK_FAKE_ERROR_FOR_TESTING";
default:
return "<Unknown VkResult>";
}
}
- MaybeError CheckVkSuccess(VkResult result, const char* context) {
+ MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
+ if (DAWN_LIKELY(result == VK_SUCCESS)) {
+ return {};
+ }
+
+ std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+
+ if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
+ }
+ }
+
+ MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
if (DAWN_LIKELY(result == VK_SUCCESS)) {
return {};
}
std::string message = std::string(context) + " failed with " + VkResultAsString(result);
- return DAWN_DEVICE_LOST_ERROR(message);
+
+ if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+ return DAWN_OUT_OF_MEMORY_ERROR(message);
+ } else if (result == VK_ERROR_DEVICE_LOST) {
+ return DAWN_DEVICE_LOST_ERROR(message);
+ } else {
+ return DAWN_INTERNAL_ERROR(message);
+ }
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h
index 3dedece1ad4..7748f56cd39 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanError.h
@@ -15,19 +15,35 @@
#ifndef DAWNNATIVE_VULKAN_VULKANERROR_H_
#define DAWNNATIVE_VULKAN_VULKANERROR_H_
-#include "common/vulkan_platform.h"
-#include "dawn_native/Error.h"
+#include "dawn_native/ErrorInjector.h"
+#include "dawn_native/vulkan/VulkanFunctions.h"
+
+constexpr VkResult VK_FAKE_ERROR_FOR_TESTING = VK_RESULT_MAX_ENUM;
+constexpr VkResult VK_FAKE_DEVICE_OOM_FOR_TESTING = static_cast<VkResult>(VK_RESULT_MAX_ENUM - 1);
namespace dawn_native { namespace vulkan {
// Returns a string version of the result.
- const char* VkResultAsString(VkResult result);
+ const char* VkResultAsString(::VkResult result);
+
+ MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
+ MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
+
+// Returns a success only if result if VK_SUCCESS, an error with the context and stringified
+// result value instead. Can be used like this:
+//
+// DAWN_TRY(CheckVkSuccess(vkDoSomething, "doing something"));
+#define CheckVkSuccess(resultIn, contextIn) \
+ ::dawn_native::vulkan::CheckVkSuccessImpl( \
+ ::dawn_native::vulkan::VkResult::WrapUnsafe( \
+ INJECT_ERROR_OR_RUN(resultIn, VK_FAKE_ERROR_FOR_TESTING)), \
+ contextIn)
- // Returns a success only if result if VK_SUCCESS, an error with the context and stringified
- // result value instead. Can be used like this:
- //
- // DAWN_TRY(CheckVkSuccess(vkDoSomething, "doing something"));
- MaybeError CheckVkSuccess(VkResult result, const char* context);
+#define CheckVkOOMThenSuccess(resultIn, contextIn) \
+ ::dawn_native::vulkan::CheckVkOOMThenSuccessImpl( \
+ ::dawn_native::vulkan::VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN( \
+ resultIn, VK_FAKE_DEVICE_OOM_FOR_TESTING, VK_FAKE_ERROR_FOR_TESTING)), \
+ contextIn)
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
index 5cec83a2b99..159b7407f13 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
@@ -22,12 +22,12 @@ namespace dawn_native { namespace vulkan {
#define GET_GLOBAL_PROC(name) \
name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(nullptr, "vk" #name)); \
if (name == nullptr) { \
- return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't get proc vk") + #name); \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
}
MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
- return DAWN_DEVICE_LOST_ERROR("Couldn't get vkGetInstanceProcAddr");
+ return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
}
GET_GLOBAL_PROC(CreateInstance);
@@ -41,12 +41,15 @@ namespace dawn_native { namespace vulkan {
return {};
}
-#define GET_INSTANCE_PROC(name) \
- name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #name)); \
- if (name == nullptr) { \
- return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't get proc vk") + #name); \
+#define GET_INSTANCE_PROC_BASE(name, procName) \
+ name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
+ if (name == nullptr) { \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #procName); \
}
+#define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
+#define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
+
MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
const VulkanGlobalInfo& globalInfo) {
// Load this proc first so that we can destroy the instance even if some other
@@ -73,26 +76,36 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC(DestroyDebugReportCallbackEXT);
}
- // Vulkan 1.1 is not required to report promoted extensions from 1.0
- if (globalInfo.externalMemoryCapabilities ||
- globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferPropertiesKHR);
+ // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
+ // support the vendor entrypoint in GetProcAddress.
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
+ } else if (globalInfo.externalMemoryCapabilities) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
}
- if (globalInfo.externalSemaphoreCapabilities ||
- globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphorePropertiesKHR);
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+ } else if (globalInfo.externalSemaphoreCapabilities) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
}
- if (globalInfo.getPhysicalDeviceProperties2 ||
- globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
- GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceProperties2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2KHR);
- GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2KHR);
+ if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
+ GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
+ } else if (globalInfo.getPhysicalDeviceProperties2) {
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
+ GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
}
if (globalInfo.surface) {
@@ -103,19 +116,38 @@ namespace dawn_native { namespace vulkan {
GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
}
-#ifdef VK_USE_PLATFORM_FUCHSIA
+#if defined(VK_USE_PLATFORM_FUCHSIA)
if (globalInfo.fuchsiaImagePipeSurface) {
GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
}
-#endif
+#endif // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ if (globalInfo.metalSurface) {
+ GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
+ }
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+#if defined(DAWN_PLATFORM_WINDOWS)
+ if (globalInfo.win32Surface) {
+ GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
+ }
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ if (globalInfo.xlibSurface) {
+ GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
+ GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
+ }
+#endif // defined(DAWN_USE_X11)
return {};
}
#define GET_DEVICE_PROC(name) \
name = reinterpret_cast<decltype(name)>(GetDeviceProcAddr(device, "vk" #name)); \
if (name == nullptr) { \
- return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't get proc vk") + #name); \
+ return DAWN_INTERNAL_ERROR(std::string("Couldn't get proc vk") + #name); \
}
MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
index eb5a4725d72..1f0a4537b43 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
@@ -84,32 +84,52 @@ namespace dawn_native { namespace vulkan {
PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
nullptr;
- // Core Vulkan 1.1 promoted extensions
+ // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
+ // present.
// VK_KHR_external_memory_capabilities
- PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR
- GetPhysicalDeviceExternalBufferPropertiesKHR = nullptr;
+ PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
+ nullptr;
// VK_KHR_external_semaphore_capabilities
- PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR
- GetPhysicalDeviceExternalSemaphorePropertiesKHR = nullptr;
+ PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
+ GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
// VK_KHR_get_physical_device_properties2
- PFN_vkGetPhysicalDeviceFeatures2KHR GetPhysicalDeviceFeatures2KHR = nullptr;
- PFN_vkGetPhysicalDeviceProperties2KHR GetPhysicalDeviceProperties2KHR = nullptr;
- PFN_vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR = nullptr;
- PFN_vkGetPhysicalDeviceImageFormatProperties2KHR
- GetPhysicalDeviceImageFormatProperties2KHR = nullptr;
- PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR
- GetPhysicalDeviceQueueFamilyProperties2KHR = nullptr;
- PFN_vkGetPhysicalDeviceMemoryProperties2KHR GetPhysicalDeviceMemoryProperties2KHR = nullptr;
- PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR
- GetPhysicalDeviceSparseImageFormatProperties2KHR = nullptr;
-
-#ifdef VK_USE_PLATFORM_FUCHSIA
+ PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
+ PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
+ nullptr;
+ PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
+ nullptr;
+ PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
+ PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
+ GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
+
+#if defined(VK_USE_PLATFORM_FUCHSIA)
// FUCHSIA_image_pipe_surface
PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
-#endif
+#endif // defined(VK_USE_PLATFORM_FUCHSIA)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+ // EXT_metal_surface
+ PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
+#endif // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ // KHR_win32_surface
+ PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
+ GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
+#endif // defined(DAWN_PLATFORM_WINDOWS)
+
+#if defined(DAWN_USE_X11)
+ // KHR_xlib_surface
+ PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
+ PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
+ GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
+#endif // defined(DAWN_USE_X11)
// ---------- Device procs
@@ -266,6 +286,28 @@ namespace dawn_native { namespace vulkan {
#endif
};
+ // Create a wrapper around VkResult in the dawn_native::vulkan namespace. This shadows the
+ // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
+ // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
+ // about handling error cases.
+ class VkResult {
+ public:
+ constexpr static VkResult WrapUnsafe(::VkResult value) {
+ return VkResult(value);
+ }
+
+ constexpr operator ::VkResult() const {
+ return mValue;
+ }
+
+ private:
+ // Private. Use VkResult::WrapUnsafe instead.
+ constexpr VkResult(::VkResult value) : mValue(value) {
+ }
+
+ ::VkResult mValue;
+ };
+
}} // namespace dawn_native::vulkan
#endif // DAWNNATIVE_VULKAN_VULKANFUNCTIONS_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
index ecaa20e7063..3d2f66c8e01 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
@@ -14,38 +14,40 @@
#include "dawn_native/vulkan/VulkanInfo.h"
+#include "common/Log.h"
#include "dawn_native/vulkan/AdapterVk.h"
#include "dawn_native/vulkan/BackendVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
#include <cstring>
-namespace {
- bool IsLayerName(const VkLayerProperties& layer, const char* name) {
- return strncmp(layer.layerName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
- }
+namespace dawn_native { namespace vulkan {
- bool IsExtensionName(const VkExtensionProperties& extension, const char* name) {
- return strncmp(extension.extensionName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
- }
+ namespace {
+ bool IsLayerName(const VkLayerProperties& layer, const char* name) {
+ return strncmp(layer.layerName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
+ }
- bool EnumerateInstanceExtensions(const char* layerName,
- const dawn_native::vulkan::VulkanFunctions& vkFunctions,
- std::vector<VkExtensionProperties>* extensions) {
- uint32_t count = 0;
- VkResult result =
- vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr);
- if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return false;
+ bool IsExtensionName(const VkExtensionProperties& extension, const char* name) {
+ return strncmp(extension.extensionName, name, VK_MAX_EXTENSION_NAME_SIZE) == 0;
}
- extensions->resize(count);
- result =
- vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, extensions->data());
- return (result == VK_SUCCESS);
- }
-} // namespace
+ bool EnumerateInstanceExtensions(const char* layerName,
+ const dawn_native::vulkan::VulkanFunctions& vkFunctions,
+ std::vector<VkExtensionProperties>* extensions) {
+ uint32_t count = 0;
+ VkResult result = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
+ if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+ return false;
+ }
+ extensions->resize(count);
+ result = VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceExtensionProperties(
+ layerName, &count, extensions->data()));
+ return (result == VK_SUCCESS);
+ }
-namespace dawn_native { namespace vulkan {
+ } // namespace
const char kLayerNameLunargStandardValidation[] = "VK_LAYER_LUNARG_standard_validation";
const char kLayerNameLunargVKTrace[] = "VK_LAYER_LUNARG_vktrace";
@@ -54,7 +56,7 @@ namespace dawn_native { namespace vulkan {
const char kExtensionNameExtDebugMarker[] = "VK_EXT_debug_marker";
const char kExtensionNameExtDebugReport[] = "VK_EXT_debug_report";
- const char kExtensionNameMvkMacosSurface[] = "VK_MVK_macos_surface";
+ const char kExtensionNameExtMetalSurface[] = "VK_EXT_metal_surface";
const char kExtensionNameKhrExternalMemory[] = "VK_KHR_external_memory";
const char kExtensionNameKhrExternalMemoryCapabilities[] =
"VK_KHR_external_memory_capabilities";
@@ -85,19 +87,19 @@ namespace dawn_native { namespace vulkan {
// Gather the info about the instance layers
{
uint32_t count = 0;
- VkResult result = vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr);
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
// From the Vulkan spec result should be success if there are 0 layers,
// incomplete otherwise. This means that both values represent a success.
// This is the same for all Enumarte functions
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceLayerProperties");
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
}
info.layers.resize(count);
- result = vkFunctions.EnumerateInstanceLayerProperties(&count, info.layers.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceLayerProperties");
- }
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumerateInstanceLayerProperties(&count, info.layers.data()),
+ "vkEnumerateInstanceLayerProperties"));
for (const auto& layer : info.layers) {
if (IsLayerName(layer, kLayerNameLunargStandardValidation)) {
@@ -121,15 +123,15 @@ namespace dawn_native { namespace vulkan {
// Gather the info about the instance extensions
{
if (!EnumerateInstanceExtensions(nullptr, vkFunctions, &info.extensions)) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceExtensionProperties");
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
}
for (const auto& extension : info.extensions) {
if (IsExtensionName(extension, kExtensionNameExtDebugReport)) {
info.debugReport = true;
}
- if (IsExtensionName(extension, kExtensionNameMvkMacosSurface)) {
- info.macosSurface = true;
+ if (IsExtensionName(extension, kExtensionNameExtMetalSurface)) {
+ info.metalSurface = true;
}
if (IsExtensionName(extension, kExtensionNameKhrExternalMemoryCapabilities)) {
info.externalMemoryCapabilities = true;
@@ -167,7 +169,7 @@ namespace dawn_native { namespace vulkan {
std::vector<VkExtensionProperties> layer_extensions;
if (!EnumerateInstanceExtensions(kLayerNameFuchsiaImagePipeSwapchain, vkFunctions,
&layer_extensions)) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateInstanceExtensionProperties");
+ return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
}
for (const auto& extension : layer_extensions) {
@@ -202,16 +204,16 @@ namespace dawn_native { namespace vulkan {
const VulkanFunctions& vkFunctions = backend.GetFunctions();
uint32_t count = 0;
- VkResult result = vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr);
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumeratePhysicalDevices");
+ return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
}
std::vector<VkPhysicalDevice> physicalDevices(count);
- result = vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumeratePhysicalDevices");
- }
+ DAWN_TRY(CheckVkSuccess(
+ vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
+ "vkEnumeratePhysicalDevices"));
return physicalDevices;
}
@@ -249,35 +251,31 @@ namespace dawn_native { namespace vulkan {
// Gather the info about the device layers
{
uint32_t count = 0;
- VkResult result =
- vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr);
+ VkResult result = VkResult::WrapUnsafe(
+ vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateDeviceLayerProperties");
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
}
info.layers.resize(count);
- result = vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count,
- info.layers.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateDeviceLayerProperties");
- }
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
+ physicalDevice, &count, info.layers.data()),
+ "vkEnumerateDeviceLayerProperties"));
}
// Gather the info about the device extensions
{
uint32_t count = 0;
- VkResult result = vkFunctions.EnumerateDeviceExtensionProperties(
- physicalDevice, nullptr, &count, nullptr);
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, nullptr));
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateDeviceExtensionProperties");
+ return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
}
info.extensions.resize(count);
- result = vkFunctions.EnumerateDeviceExtensionProperties(physicalDevice, nullptr, &count,
- info.extensions.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkEnumerateDeviceExtensionProperties");
- }
+ DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+ physicalDevice, nullptr, &count, info.extensions.data()),
+ "vkEnumerateDeviceExtensionProperties"));
for (const auto& extension : info.extensions) {
if (IsExtensionName(extension, kExtensionNameExtDebugMarker)) {
@@ -316,79 +314,75 @@ namespace dawn_native { namespace vulkan {
}
}
+ // Mark the extensions promoted to Vulkan 1.1 as available.
+ if (info.properties.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ info.maintenance1 = true;
+ }
+
// TODO(cwallez@chromium.org): gather info about formats
return info;
}
- MaybeError GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface,
- VulkanSurfaceInfo* info) {
+ ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+ VkSurfaceKHR surface) {
+ VulkanSurfaceInfo info = {};
+
VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
const VulkanFunctions& vkFunctions = adapter.GetBackend()->GetFunctions();
// Get the surface capabilities
- {
- VkResult result = vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
- physicalDevice, surface, &info->capabilities);
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
- }
- }
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
+ physicalDevice, surface, &info.capabilities),
+ "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
// Query which queue families support presenting this surface
{
size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
- info->supportedQueueFamilies.resize(nQueueFamilies, false);
+ info.supportedQueueFamilies.resize(nQueueFamilies, false);
for (uint32_t i = 0; i < nQueueFamilies; ++i) {
VkBool32 supported = VK_FALSE;
- VkResult result = vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
- physicalDevice, i, surface, &supported);
-
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfaceSupportKHR");
- }
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, i, surface, &supported),
+ "vkGetPhysicalDeviceSurfaceSupportKHR"));
- info->supportedQueueFamilies[i] = (supported == VK_TRUE);
+ info.supportedQueueFamilies[i] = (supported == VK_TRUE);
}
}
// Gather supported formats
{
uint32_t count = 0;
- VkResult result = vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, &count, nullptr);
+ VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, nullptr));
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
}
- info->formats.resize(count);
- result = vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, &count,
- info->formats.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
- }
+ info.formats.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, &count, info.formats.data()),
+ "vkGetPhysicalDeviceSurfaceFormatsKHR"));
}
// Gather supported presents modes
{
uint32_t count = 0;
- VkResult result = vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, nullptr);
+ VkResult result =
+ VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, nullptr));
if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
+ return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
}
- info->presentModes.resize(count);
- result = vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, &count, info->presentModes.data());
- if (result != VK_SUCCESS) {
- return DAWN_DEVICE_LOST_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
- }
+ info.presentModes.resize(count);
+ DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+ physicalDevice, surface, &count, info.presentModes.data()),
+ "vkGetPhysicalDeviceSurfacePresentModesKHR"));
}
- return {};
+ return info;
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
index ac64af0b4fe..b62b32a9fee 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
@@ -32,7 +32,7 @@ namespace dawn_native { namespace vulkan {
extern const char kExtensionNameExtDebugMarker[];
extern const char kExtensionNameExtDebugReport[];
- extern const char kExtensionNameMvkMacosSurface[];
+ extern const char kExtensionNameExtMetalSurface[];
extern const char kExtensionNameKhrExternalMemory[];
extern const char kExtensionNameKhrExternalMemoryCapabilities[];
extern const char kExtensionNameKhrExternalMemoryFD[];
@@ -66,7 +66,7 @@ namespace dawn_native { namespace vulkan {
bool externalMemoryCapabilities = false;
bool externalSemaphoreCapabilities = false;
bool getPhysicalDeviceProperties2 = false;
- bool macosSurface = false;
+ bool metalSurface = false;
bool surface = false;
bool waylandSurface = false;
bool win32Surface = false;
@@ -86,7 +86,7 @@ namespace dawn_native { namespace vulkan {
struct VulkanDeviceKnobs {
VkPhysicalDeviceFeatures features;
- // Extensions
+ // Extensions, promoted extensions are set to true if their core version is supported.
bool debugMarker = false;
bool externalMemory = false;
bool externalMemoryFD = false;
@@ -122,9 +122,8 @@ namespace dawn_native { namespace vulkan {
ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const Backend& backend);
ResultOrError<std::vector<VkPhysicalDevice>> GetPhysicalDevices(const Backend& backend);
ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
- MaybeError GatherSurfaceInfo(const Adapter& adapter,
- VkSurfaceKHR surface,
- VulkanSurfaceInfo* info);
+ ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
+ VkSurfaceKHR surface);
}} // namespace dawn_native::vulkan
#endif // DAWNNATIVE_VULKAN_VULKANINFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
index e9944f8a4af..7c5c82da02f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -41,13 +41,13 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
formatProps.pNext = &formatModifierPropsList;
- fn.GetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, &formatProps);
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierProps(modifierCount);
formatModifierPropsList.pDrmFormatModifierProperties = formatModifierProps.data();
- fn.GetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, &formatProps);
+ fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
for (const auto& props : formatModifierProps) {
if (props.drmFormatModifier == modifier) {
uint32_t count = props.drmFormatModifierPlaneCount;
@@ -141,8 +141,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
imageFormatProps.pNext = &externalImageFormatProps;
- VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2KHR(
- physicalDevice, &imageFormatInfo, &imageFormatProps);
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ physicalDevice, &imageFormatInfo, &imageFormatProps));
if (result != VK_SUCCESS) {
return false;
}
@@ -195,7 +195,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
memoryDedicatedAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
memoryDedicatedAllocateInfo.pNext = nullptr;
memoryDedicatedAllocateInfo.image = image;
- memoryDedicatedAllocateInfo.buffer = VK_NULL_HANDLE;
+ memoryDedicatedAllocateInfo.buffer = VkBuffer{};
VkImportMemoryFdInfoKHR importMemoryFdInfo;
importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
@@ -212,7 +212,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(
CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
- nullptr, &allocatedMemory),
+ nullptr, &*allocatedMemory),
"vkAllocateMemory"));
return allocatedMemory;
}
@@ -263,7 +263,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
// Create a new VkImage with tiling equal to the DRM format modifier.
VkImage image;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &image),
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
"CreateImage"));
return image;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index 2a31b311874..872432410a8 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -66,8 +66,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
formatProperties.pNext = &externalFormatProperties;
- VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2KHR(
- ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+ VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+ ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
// If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
if (result != VK_SUCCESS) {
@@ -128,7 +128,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &allocatedMemory),
+ nullptr, &*allocatedMemory),
"vkAllocateMemory"));
return allocatedMemory;
}
@@ -144,7 +144,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkImage image;
DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &image),
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
"CreateImage"));
return image;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 8c70c677c51..10b99555e5c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -66,7 +66,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
formatProperties.pNext = &externalFormatProperties;
- VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2KHR(
+ VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
// If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
@@ -130,7 +130,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &allocatedMemory),
+ nullptr, &*allocatedMemory),
"vkAllocateMemory"));
return allocatedMemory;
}
@@ -146,7 +146,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkImage image;
DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &image),
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
"CreateImage"));
return image;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
index ea7bf47d0f0..e79288a996a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceOpaqueFD.cpp
@@ -43,7 +43,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
semaphoreProperties.pNext = nullptr;
- mDevice->fn.GetPhysicalDeviceExternalSemaphorePropertiesKHR(
+ mDevice->fn.GetPhysicalDeviceExternalSemaphoreProperties(
ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &semaphoreInfo,
&semaphoreProperties);
@@ -72,7 +72,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
info.flags = 0;
DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &semaphore),
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
"vkCreateSemaphore"));
VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
@@ -109,7 +109,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
VkSemaphore signalSemaphore;
DAWN_TRY(
CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &signalSemaphore),
+ nullptr, &*signalSemaphore),
"vkCreateSemaphore"));
return signalSemaphore;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
index 81fb9fd5ecd..fd10076e9eb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -43,7 +43,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
semaphoreProperties.pNext = nullptr;
- mDevice->fn.GetPhysicalDeviceExternalSemaphorePropertiesKHR(
+ mDevice->fn.GetPhysicalDeviceExternalSemaphoreProperties(
ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &semaphoreInfo,
&semaphoreProperties);
@@ -72,7 +72,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
info.flags = 0;
DAWN_TRY(CheckVkSuccess(
- mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &semaphore),
+ mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
"vkCreateSemaphore"));
VkImportSemaphoreZirconHandleInfoFUCHSIA importSempahoreHandleInfo;
@@ -112,7 +112,7 @@ namespace dawn_native { namespace vulkan { namespace external_semaphore {
VkSemaphore signalSemaphore;
DAWN_TRY(
CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
- nullptr, &signalSemaphore),
+ nullptr, &*signalSemaphore),
"vkCreateSemaphore"));
return signalSemaphore;
}
diff --git a/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt
new file mode 100644
index 00000000000..d94b5525ed3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_platform/CMakeLists.txt
@@ -0,0 +1,22 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_platform STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_platform PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_platform/DawnPlatform.h"
+ "tracing/EventTracer.cpp"
+ "tracing/EventTracer.h"
+ "tracing/TraceEvent.h"
+)
+target_link_libraries(dawn_platform PRIVATE dawn_internal_config dawn_common)
diff --git a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
new file mode 100644
index 00000000000..8ec7bff9ab9
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
@@ -0,0 +1,57 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DawnJSONGenerator(
+ TARGET "dawn_wire"
+ PRINT_NAME "Dawn wire"
+ RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
+)
+
+add_library(dawn_wire STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_wire PRIVATE
+ "${DAWN_INCLUDE_DIR}/dawn_wire/Wire.h"
+ "${DAWN_INCLUDE_DIR}/dawn_wire/WireClient.h"
+ "${DAWN_INCLUDE_DIR}/dawn_wire/WireServer.h"
+ "${DAWN_INCLUDE_DIR}/dawn_wire/dawn_wire_export.h"
+ ${DAWN_WIRE_GEN_SOURCES}
+ "WireClient.cpp"
+ "WireDeserializeAllocator.cpp"
+ "WireDeserializeAllocator.h"
+ "WireServer.cpp"
+ "client/ApiObjects.h"
+ "client/ApiProcs.cpp"
+ "client/Buffer.cpp"
+ "client/Buffer.h"
+ "client/Client.cpp"
+ "client/Client.h"
+ "client/ClientDoers.cpp"
+ "client/ClientInlineMemoryTransferService.cpp"
+ "client/Device.cpp"
+ "client/Device.h"
+ "client/Fence.cpp"
+ "client/Fence.h"
+ "client/ObjectAllocator.h"
+ "server/ObjectStorage.h"
+ "server/Server.cpp"
+ "server/Server.h"
+ "server/ServerBuffer.cpp"
+ "server/ServerDevice.cpp"
+ "server/ServerFence.cpp"
+ "server/ServerInlineMemoryTransferService.cpp"
+ "server/ServerQueue.cpp"
+)
+target_link_libraries(dawn_wire
+ PUBLIC dawn_headers
+ PRIVATE dawn_common dawn_internal_config
+)
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
index ac0a25ae27d..972d6decd8c 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
@@ -25,12 +25,13 @@ namespace dawn_wire {
mImpl.reset();
}
- WGPUDevice WireClient::GetDevice() const {
- return mImpl->GetDevice();
+ // static
+ DawnProcTable WireClient::GetProcs() {
+ return client::GetProcs();
}
- DawnProcTable WireClient::GetProcs() const {
- return client::GetProcs();
+ WGPUDevice WireClient::GetDevice() const {
+ return mImpl->GetDevice();
}
const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
index 14f25e4b19e..128bde7f589 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
@@ -431,5 +431,11 @@ namespace dawn_wire { namespace client {
Device* device = reinterpret_cast<Device*>(cSelf);
device->SetUncapturedErrorCallback(callback, userdata);
}
+ void ClientDeviceSetDeviceLostCallback(WGPUDevice cSelf,
+ WGPUDeviceLostCallback callback,
+ void* userdata) {
+ Device* device = reinterpret_cast<Device*>(cSelf);
+ device->SetDeviceLostCallback(callback, userdata);
+ }
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index 1be0f1df018..dd904068692 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -35,6 +35,11 @@ namespace dawn_wire { namespace client {
return true;
}
+ bool Client::DoDeviceLostCallback(char const* message) {
+ mDevice->HandleDeviceLost(message);
+ return true;
+ }
+
bool Client::DoDevicePopErrorScopeCallback(uint64_t requestSerial,
WGPUErrorType errorType,
const char* message) {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index 8577a4f644e..175617fa968 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -42,11 +42,22 @@ namespace dawn_wire { namespace client {
}
}
+ void Device::HandleDeviceLost(const char* message) {
+ if (mDeviceLostCallback) {
+ mDeviceLostCallback(message, mDeviceLostUserdata);
+ }
+ }
+
void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
mErrorCallback = errorCallback;
mErrorUserdata = errorUserdata;
}
+ void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
+ mDeviceLostCallback = callback;
+ mDeviceLostUserdata = userdata;
+ }
+
void Device::PushErrorScope(WGPUErrorFilter filter) {
mErrorScopeStackSize++;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
index 9c1bb2f9fa1..af5934e8256 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
@@ -32,7 +32,9 @@ namespace dawn_wire { namespace client {
Client* GetClient();
void HandleError(WGPUErrorType errorType, const char* message);
+ void HandleDeviceLost(const char* message);
void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
+ void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
void PushErrorScope(WGPUErrorFilter filter);
bool RequestPopErrorScope(WGPUErrorCallback callback, void* userdata);
@@ -49,7 +51,9 @@ namespace dawn_wire { namespace client {
Client* mClient = nullptr;
WGPUErrorCallback mErrorCallback = nullptr;
+ WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
void* mErrorUserdata;
+ void* mDeviceLostUserdata;
};
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h b/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
index 55d6a0962aa..e98966dc4ec 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
@@ -94,10 +94,10 @@ namespace dawn_wire { namespace server {
}
// Allocates the data for a given ID and returns it.
- // Returns nullptr if the ID is already allocated, or too far ahead.
- // Invalidates all the Data*
+ // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
+ // reserved for nullptr). Invalidates all the Data*
Data* Allocate(uint32_t id) {
- if (id > mKnown.size()) {
+ if (id == 0 || id > mKnown.size()) {
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
index d03980acf14..d05285df100 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
@@ -32,6 +32,7 @@ namespace dawn_wire { namespace server {
deviceData->handle = device;
mProcs.deviceSetUncapturedErrorCallback(device, ForwardUncapturedError, this);
+ mProcs.deviceSetDeviceLostCallback(device, ForwardDeviceLost, this);
}
Server::~Server() {
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 28f4c81fba1..effe69e25b1 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -62,6 +62,7 @@ namespace dawn_wire { namespace server {
// Forwarding callbacks
static void ForwardUncapturedError(WGPUErrorType type, const char* message, void* userdata);
+ static void ForwardDeviceLost(const char* message, void* userdata);
static void ForwardPopErrorScope(WGPUErrorType type, const char* message, void* userdata);
static void ForwardBufferMapReadAsync(WGPUBufferMapAsyncStatus status,
const void* ptr,
@@ -75,6 +76,7 @@ namespace dawn_wire { namespace server {
// Error callbacks
void OnUncapturedError(WGPUErrorType type, const char* message);
+ void OnDeviceLost(const char* message);
void OnDevicePopErrorScope(WGPUErrorType type,
const char* message,
ErrorScopeUserdata* userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 6f27867d2a7..66c0d70153d 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -21,6 +21,11 @@ namespace dawn_wire { namespace server {
server->OnUncapturedError(type, message);
}
+ void Server::ForwardDeviceLost(const char* message, void* userdata) {
+ auto server = static_cast<Server*>(userdata);
+ server->OnDeviceLost(message);
+ }
+
void Server::OnUncapturedError(WGPUErrorType type, const char* message) {
ReturnDeviceUncapturedErrorCallbackCmd cmd;
cmd.type = type;
@@ -31,6 +36,15 @@ namespace dawn_wire { namespace server {
cmd.Serialize(allocatedBuffer);
}
+ void Server::OnDeviceLost(const char* message) {
+ ReturnDeviceLostCallbackCmd cmd;
+ cmd.message = message;
+
+ size_t requiredSize = cmd.GetRequiredSize();
+ char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
+ cmd.Serialize(allocatedBuffer);
+ }
+
bool Server::DoDevicePopErrorScope(WGPUDevice cDevice, uint64_t requestSerial) {
ErrorScopeUserdata* userdata = new ErrorScopeUserdata;
userdata->server = this;
diff --git a/chromium/third_party/dawn/src/fuzzers/BUILD.gn b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
index 57bdcf3a4a9..525016d0aaa 100644
--- a/chromium/third_party/dawn/src/fuzzers/BUILD.gn
+++ b/chromium/third_party/dawn/src/fuzzers/BUILD.gn
@@ -79,6 +79,20 @@ static_library("dawn_spirv_cross_fuzzer_common") {
]
}
+static_library("dawn_wire_server_fuzzer_common") {
+ sources = [
+ "DawnWireServerFuzzer.cpp",
+ "DawnWireServerFuzzer.h",
+ ]
+ public_deps = [
+ "${dawn_root}/:libdawn_native_static",
+ "${dawn_root}/:libdawn_wire_static",
+ "${dawn_root}/src/common",
+ "${dawn_root}/src/dawn:dawncpp",
+ "${dawn_root}/src/dawn:libdawn_proc",
+ ]
+}
+
# TODO(rharrison): Remove asan_options once signal trap is no longer
# needed.
# Uses Dawn specific options and varies input data
@@ -118,17 +132,52 @@ dawn_fuzzer_test("dawn_spirv_cross_msl_fast_fuzzer") {
asan_options = [ "allow_user_segv_handler=1" ]
}
+dawn_fuzzer_test("dawn_spvc_glsl_fast_fuzzer") {
+ sources = [
+ "DawnSPVCglslFastFuzzer.cpp",
+ ]
+ deps = [
+ "${dawn_shaderc_dir}:libshaderc_spvc",
+ ]
+}
+
+dawn_fuzzer_test("dawn_spvc_hlsl_fast_fuzzer") {
+ sources = [
+ "DawnSPVChlslFastFuzzer.cpp",
+ ]
+ deps = [
+ "${dawn_shaderc_dir}:libshaderc_spvc",
+ ]
+}
+
+dawn_fuzzer_test("dawn_spvc_msl_fast_fuzzer") {
+ sources = [
+ "DawnSPVCmslFastFuzzer.cpp",
+ ]
+ deps = [
+ "${dawn_shaderc_dir}:libshaderc_spvc",
+ ]
+}
+
dawn_fuzzer_test("dawn_wire_server_and_frontend_fuzzer") {
sources = [
"DawnWireServerAndFrontendFuzzer.cpp",
]
deps = [
- "${dawn_root}/:libdawn_native_static",
- "${dawn_root}/:libdawn_wire_static",
- "${dawn_root}/src/common",
- "${dawn_root}/src/dawn:dawncpp",
- "${dawn_root}/src/dawn:libdawn_proc",
+ ":dawn_wire_server_fuzzer_common",
+ ]
+
+ additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
+}
+
+dawn_fuzzer_test("dawn_wire_server_and_vulkan_backend_fuzzer") {
+ sources = [
+ "DawnWireServerAndVulkanBackendFuzzer.cpp",
+ ]
+
+ deps = [
+ ":dawn_wire_server_fuzzer_common",
]
additional_configs = [ "${dawn_root}/src/common:dawn_internal" ]
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index de12d640fc8..0cef24c0026 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -30,11 +30,22 @@ namespace dawn_native { namespace d3d12 {
DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
+ public:
+ ExternalImageDescriptorDXGISharedHandle();
+
+ HANDLE sharedHandle;
+ uint64_t acquireMutexKey;
+ bool isSwapChainTexture = false;
+ };
+
+ DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
+ uint64_t requestedReservationSize);
+
// Note: SharedHandle must be a handle to a texture object.
- DAWN_NATIVE_EXPORT WGPUTexture WrapSharedHandle(WGPUDevice device,
- const WGPUTextureDescriptor* descriptor,
- HANDLE sharedHandle,
- uint64_t acquireMutexKey);
+ DAWN_NATIVE_EXPORT WGPUTexture
+ WrapSharedHandle(WGPUDevice device, const ExternalImageDescriptorDXGISharedHandle* descriptor);
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12BACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index b9a1d0eaee4..0767e6bcd67 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -26,14 +26,20 @@ namespace dawn_platform {
class Platform;
} // namespace dawn_platform
+namespace wgpu {
+ struct AdapterProperties;
+}
+
namespace dawn_native {
+ // DEPRECATED: use WGPUAdapterProperties instead.
struct PCIInfo {
uint32_t deviceId = 0;
uint32_t vendorId = 0;
std::string name;
};
+ // DEPRECATED: use WGPUBackendType instead.
enum class BackendType {
D3D12,
Metal,
@@ -42,6 +48,7 @@ namespace dawn_native {
Vulkan,
};
+ // DEPRECATED: use WGPUAdapterType instead.
enum class DeviceType {
DiscreteGPU,
IntegratedGPU,
@@ -86,9 +93,15 @@ namespace dawn_native {
Adapter(AdapterBase* impl);
~Adapter();
+ // DEPRECATED: use GetProperties instead.
BackendType GetBackendType() const;
DeviceType GetDeviceType() const;
const PCIInfo& GetPCIInfo() const;
+
+ // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
+ // dawn.json
+ void GetProperties(wgpu::AdapterProperties* properties) const;
+
std::vector<const char*> GetSupportedExtensions() const;
WGPUDeviceProperties GetAdapterProperties() const;
@@ -106,10 +119,10 @@ namespace dawn_native {
// Base class for options passed to Instance::DiscoverAdapters.
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
public:
- const BackendType backendType;
+ const WGPUBackendType backendType;
protected:
- AdapterDiscoveryOptionsBase(BackendType type);
+ AdapterDiscoveryOptionsBase(WGPUBackendType type);
};
// Represents a connection to dawn_native and is used for dependency injection, discovering
@@ -140,14 +153,14 @@ namespace dawn_native {
// Enable backend's validation layers if it has.
void EnableBackendValidation(bool enableBackendValidation);
- bool IsBackendValidationEnabled() const;
// Enable debug capture on Dawn startup
void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
- bool IsBeginCaptureOnStartupEnabled() const;
void SetPlatform(dawn_platform::Platform* platform);
- dawn_platform::Platform* GetPlatform() const;
+
+ // Returns the underlying WGPUInstance object.
+ WGPUInstance Get() const;
private:
InstanceBase* mImpl = nullptr;
@@ -162,8 +175,41 @@ namespace dawn_native {
// Backdoor to get the number of lazy clears for testing
DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
+ // Query if texture has been initialized
+ DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount);
+
// Backdoor to get the order of the ProcMap for testing
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
+
+ // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
+ DAWN_NATIVE_EXPORT void EnableErrorInjector();
+ DAWN_NATIVE_EXPORT void DisableErrorInjector();
+ DAWN_NATIVE_EXPORT void ClearErrorInjector();
+ DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
+ DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
+
+ // The different types of ExternalImageDescriptors
+ enum ExternalImageDescriptorType {
+ OpaqueFD,
+ DmaBuf,
+ IOSurface,
+ DXGISharedHandle,
+ };
+
+ // Common properties of external images
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
+ public:
+ const ExternalImageDescriptorType type;
+ const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
+ bool isCleared; // Sets whether the texture will be cleared before use
+
+ protected:
+ ExternalImageDescriptor(ExternalImageDescriptorType type);
+ };
} // namespace dawn_native
#endif // DAWNNATIVE_DAWNNATIVE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
index 6e07c058240..90884ee7f24 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
@@ -33,10 +33,16 @@ typedef __IOSurface* IOSurfaceRef;
#endif //__OBJC__
namespace dawn_native { namespace metal {
- DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
- const WGPUTextureDescriptor* descriptor,
- IOSurfaceRef ioSurface,
- uint32_t plane);
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
+ public:
+ ExternalImageDescriptorIOSurface();
+
+ IOSurfaceRef ioSurface;
+ uint32_t plane;
+ };
+
+ DAWN_NATIVE_EXPORT WGPUTexture
+ WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
// mean that the operations will be visible to other APIs/Metal devices right away. macOS
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index 30dbb05b848..b144e4ece76 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -23,32 +23,12 @@
#include <vector>
namespace dawn_native { namespace vulkan {
-
- // The different types of ExternalImageDescriptors
- enum ExternalImageDescriptorType {
-#ifdef __linux__
- OpaqueFD,
- DmaBuf,
-#endif // __linux__
- };
-
- // Common properties of external images
- struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
- public:
- const ExternalImageDescriptorType type; // Must match the subclass
- const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
- bool isCleared; // Sets whether the texture will be cleared before use
-
- protected:
- ExternalImageDescriptor(ExternalImageDescriptorType type);
- };
-
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
- VkSurfaceKHR surface);
+ DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+ CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 376151e7e87..3b9d8725cfe 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -44,8 +44,9 @@ namespace dawn_wire {
WireClient(const WireClientDescriptor& descriptor);
~WireClient();
+ static DawnProcTable GetProcs();
+
WGPUDevice GetDevice() const;
- DawnProcTable GetProcs() const;
const volatile char* HandleCommands(const volatile char* commands,
size_t size) override final;
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
index 54aa6078dc4..4fe17b0f835 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
@@ -44,24 +44,13 @@ namespace utils {
: mWindow(window), mDevice(device) {
}
- void SetupGLFWWindowHintsForBackend(dawn_native::BackendType type) {
- if (type == dawn_native::BackendType::OpenGL) {
- glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
- glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
- glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
- glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
- } else {
- glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
- }
- }
-
void DiscoverAdapter(dawn_native::Instance* instance,
GLFWwindow* window,
- dawn_native::BackendType type) {
+ wgpu::BackendType type) {
DAWN_UNUSED(type);
DAWN_UNUSED(window);
- if (type == dawn_native::BackendType::OpenGL) {
+ if (type == wgpu::BackendType::OpenGL) {
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
glfwMakeContextCurrent(window);
dawn_native::opengl::AdapterDiscoveryOptions adapterOptions;
@@ -73,32 +62,30 @@ namespace utils {
}
}
- BackendBinding* CreateBinding(dawn_native::BackendType type,
- GLFWwindow* window,
- WGPUDevice device) {
+ BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- case dawn_native::BackendType::D3D12:
+ case wgpu::BackendType::D3D12:
return CreateD3D12Binding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL)
- case dawn_native::BackendType::Metal:
+ case wgpu::BackendType::Metal:
return CreateMetalBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_NULL)
- case dawn_native::BackendType::Null:
+ case wgpu::BackendType::Null:
return CreateNullBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- case dawn_native::BackendType::OpenGL:
+ case wgpu::BackendType::OpenGL:
return CreateOpenGLBinding(window, device);
#endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- case dawn_native::BackendType::Vulkan:
+ case wgpu::BackendType::Vulkan:
return CreateVulkanBinding(window, device);
#endif
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.h b/chromium/third_party/dawn/src/utils/BackendBinding.h
index f8d35b09d6f..ca1c91ffb9d 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.h
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.h
@@ -15,7 +15,7 @@
#ifndef UTILS_BACKENDBINDING_H_
#define UTILS_BACKENDBINDING_H_
-#include "dawn/webgpu.h"
+#include "dawn/webgpu_cpp.h"
#include "dawn_native/DawnNative.h"
struct GLFWwindow;
@@ -36,13 +36,10 @@ namespace utils {
WGPUDevice mDevice = nullptr;
};
- void SetupGLFWWindowHintsForBackend(dawn_native::BackendType type);
void DiscoverAdapter(dawn_native::Instance* instance,
GLFWwindow* window,
- dawn_native::BackendType type);
- BackendBinding* CreateBinding(dawn_native::BackendType type,
- GLFWwindow* window,
- WGPUDevice device);
+ wgpu::BackendType type);
+ BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/CMakeLists.txt b/chromium/third_party/dawn/src/utils/CMakeLists.txt
new file mode 100644
index 00000000000..3a959401a2b
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/CMakeLists.txt
@@ -0,0 +1,80 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_library(dawn_utils STATIC ${DAWN_DUMMY_FILE})
+target_sources(dawn_utils PRIVATE
+ "BackendBinding.cpp"
+ "BackendBinding.h"
+ "ComboRenderBundleEncoderDescriptor.cpp"
+ "ComboRenderBundleEncoderDescriptor.h"
+ "ComboRenderPipelineDescriptor.cpp"
+ "ComboRenderPipelineDescriptor.h"
+ "GLFWUtils.cpp"
+ "GLFWUtils.h"
+ "SystemUtils.cpp"
+ "SystemUtils.h"
+ "TerribleCommandBuffer.cpp"
+ "TerribleCommandBuffer.h"
+ "TextureFormatUtils.cpp"
+ "TextureFormatUtils.h"
+ "Timer.h"
+ "WGPUHelpers.cpp"
+ "WGPUHelpers.h"
+)
+target_link_libraries(dawn_utils
+ PUBLIC dawncpp_headers
+ PRIVATE dawn_internal_config
+ dawn_common
+ dawn_native
+ dawn_wire
+ shaderc
+ glfw
+)
+
+if(WIN32)
+ target_sources(dawn_utils PRIVATE "WindowsTimer.cpp")
+elseif(APPLE)
+ target_sources(dawn_utils PRIVATE
+ "OSXTimer.cpp"
+ "ObjCUtils.h"
+ "ObjCUtils.mm"
+ )
+ target_link_libraries(dawn_utils PRIVATE "-framework QuartzCore")
+elseif(UNIX)
+ target_sources(dawn_utils PRIVATE "PosixTimer.cpp")
+endif()
+
+if (DAWN_ENABLE_D3D12)
+ target_sources(dawn_utils PRIVATE "D3D12Binding.cpp")
+endif()
+
+if (DAWN_ENABLE_METAL)
+ target_sources(dawn_utils PRIVATE
+ "GLFWUtils_metal.mm"
+ "MetalBinding.mm"
+ )
+ target_link_libraries(dawn_utils PRIVATE "-framework Metal")
+endif()
+
+if (DAWN_ENABLE_NULL)
+ target_sources(dawn_utils PRIVATE "NullBinding.cpp")
+endif()
+
+if (DAWN_ENABLE_OPENGL)
+ target_sources(dawn_utils PRIVATE "OpenGLBinding.cpp")
+endif()
+
+if (DAWN_ENABLE_VULKAN)
+ target_sources(dawn_utils PRIVATE "VulkanBinding.cpp")
+endif()
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils.cpp b/chromium/third_party/dawn/src/utils/GLFWUtils.cpp
new file mode 100644
index 00000000000..fe9195ed6ae
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/GLFWUtils.cpp
@@ -0,0 +1,83 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "utils/GLFWUtils.h"
+
+#include "GLFW/glfw3.h"
+#include "common/Platform.h"
+
+#include <cstdlib>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# define GLFW_EXPOSE_NATIVE_WIN32
+#elif defined(DAWN_USE_X11)
+# define GLFW_EXPOSE_NATIVE_X11
+#endif
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+ void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
+ if (type == wgpu::BackendType::OpenGL) {
+ // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
+ // texture views.
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ } else {
+ // Without this GLFW will initialize a GL context on the window, which prevents using
+ // the window with other APIs (by crashing in weird ways).
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+ }
+ }
+
+ wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window) {
+ std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+ SetupWindowAndGetSurfaceDescriptorForTesting(window);
+
+ wgpu::SurfaceDescriptor descriptor;
+ descriptor.nextInChain = chainedDescriptor.get();
+ wgpu::Surface surface = instance.CreateSurface(&descriptor);
+
+ return surface;
+ }
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
+ desc->hwnd = glfwGetWin32Window(window);
+ desc->hinstance = GetModuleHandle(nullptr);
+ return desc;
+ }
+#elif defined(DAWN_USE_X11)
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ std::unique_ptr<wgpu::SurfaceDescriptorFromXlib> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromXlib>();
+ desc->display = glfwGetX11Display();
+ desc->window = glfwGetX11Window(window);
+ return desc;
+ }
+#elif defined(DAWN_ENABLE_BACKEND_METAL)
+ // SetupWindowAndGetSurfaceDescriptorForTesting defined in GLFWUtils_metal.mm
+#else
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(GLFWwindow*) {
+ return nullptr;
+ }
+#endif
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils.h b/chromium/third_party/dawn/src/utils/GLFWUtils.h
new file mode 100644
index 00000000000..f2299cba9d2
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/GLFWUtils.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_GLFWUTILS_H_
+#define UTILS_GLFWUTILS_H_
+
+#include "dawn/webgpu_cpp.h"
+
+#include <memory>
+
+struct GLFWwindow;
+
+namespace utils {
+
+ // Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
+ // the specified backend.
+ void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
+
+ // Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
+ // calls `instance.CreateSurface` with the correct descriptor for this window.
+ // Returns a null wgpu::Surface on failure.
+ wgpu::Surface CreateSurfaceForWindow(wgpu::Instance instance, GLFWwindow* window);
+
+ // Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
+ // CreateSurface so the descriptor can be modified for testing.
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window);
+
+} // namespace utils
+
+#endif // UTILS_GLFWUTILS_H_
diff --git a/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm b/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm
new file mode 100644
index 00000000000..ff0942885c5
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/GLFWUtils_metal.mm
@@ -0,0 +1,54 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#if !defined(DAWN_ENABLE_BACKEND_METAL)
+# error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
+#endif // !defined(DAWN_ENABLE_BACKEND_METAL)
+
+#include "utils/GLFWUtils.h"
+
+#import <QuartzCore/CAMetalLayer.h>
+#include "GLFW/glfw3.h"
+
+#include <cstdlib>
+
+#define GLFW_EXPOSE_NATIVE_COCOA
+#include "GLFW/glfw3native.h"
+
+namespace utils {
+
+ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorForTesting(
+ GLFWwindow* window) {
+ if (@available(macOS 10.11, *)) {
+ NSWindow* nsWindow = glfwGetCocoaWindow(window);
+ NSView* view = [nsWindow contentView];
+
+ // Create a CAMetalLayer that covers the whole window that will be passed to
+ // CreateSurface.
+ [view setWantsLayer:YES];
+ [view setLayer:[CAMetalLayer layer]];
+
+ // Use retina if the window was created with retina support.
+ [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
+
+ std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
+ std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
+ desc->layer = [view layer];
+ return desc;
+ }
+
+ return nullptr;
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ObjCUtils.h b/chromium/third_party/dawn/src/utils/ObjCUtils.h
new file mode 100644
index 00000000000..17b3956a165
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/ObjCUtils.h
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_OBJCUTILS_H_
+#define UTILS_OBJCUTILS_H_
+
+// Contains helper function to manipulate ObjC objects. This helps having C++ files do a little bit
+// of ObjectiveC calls, when they cannot be converted to ObjectiveC++ because they are used on
+// multiple platforms.
+
+namespace utils {
+
+ // The returned CALayer is autoreleased.
+ void* CreateDummyCALayer();
+
+} // namespace utils
+
+#endif // UTILS_OBJCUTILS_H_
diff --git a/chromium/third_party/dawn/src/utils/ObjCUtils.mm b/chromium/third_party/dawn/src/utils/ObjCUtils.mm
new file mode 100644
index 00000000000..5eba147cb60
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/ObjCUtils.mm
@@ -0,0 +1,25 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "utils/ObjCUtils.h"
+
+#include <QuartzCore/CALayer.h>
+
+namespace utils {
+
+ void* CreateDummyCALayer() {
+ return [CALayer layer];
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
new file mode 100644
index 00000000000..634417cd12c
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
@@ -0,0 +1,89 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "TextureFormatUtils.h"
+
+namespace utils {
+ const char* GetColorTextureComponentTypePrefix(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ return "";
+
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
+ return "u";
+
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ return "i";
+ default:
+ UNREACHABLE();
+ return "";
+ }
+ }
+
+ bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
+ switch (format) {
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Float:
+ return true;
+ default:
+ return false;
+ }
+ }
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.h b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
new file mode 100644
index 00000000000..bbd5f0c1846
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
@@ -0,0 +1,58 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TEXTURE_FORMAT_UTILS_H_
+#define UTILS_TEXTURE_FORMAT_UTILS_H_
+
+#include <array>
+
+#include <dawn/webgpu_cpp.h>
+
+#include "common/Assert.h"
+
+namespace utils {
+ static constexpr std::array<wgpu::TextureFormat, 52> kAllTextureFormats = {
+ wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint, wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float, wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm, wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint, wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint, wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float, wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint, wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Float,
+ wgpu::TextureFormat::RG32Float, wgpu::TextureFormat::RG32Uint,
+ wgpu::TextureFormat::RG32Sint, wgpu::TextureFormat::RGBA16Uint,
+ wgpu::TextureFormat::RGBA16Sint, wgpu::TextureFormat::RGBA16Float,
+ wgpu::TextureFormat::RGBA32Float, wgpu::TextureFormat::RGBA32Uint,
+ wgpu::TextureFormat::RGBA32Sint, wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBSfloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ };
+
+ const char* GetColorTextureComponentTypePrefix(wgpu::TextureFormat textureFormat);
+ bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+} // namespace utils
+
+#endif
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index dcf8a2baade..e56ef1b80c6 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -16,12 +16,12 @@
#include "common/Assert.h"
#include "common/Constants.h"
+#include "common/Log.h"
#include <shaderc/shaderc.hpp>
#include <cstring>
#include <iomanip>
-#include <iostream>
#include <sstream>
namespace utils {
@@ -67,7 +67,7 @@ namespace utils {
shaderc::Compiler compiler;
auto result = compiler.CompileGlslToSpv(source, strlen(source), kind, "myshader?");
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
- std::cerr << result.GetErrorMessage();
+ dawn::ErrorLog() << result.GetErrorMessage();
return {};
}
#ifdef DUMP_SPIRV_ASSEMBLY
@@ -106,7 +106,7 @@ namespace utils {
shaderc::Compiler compiler;
shaderc::SpvCompilationResult result = compiler.AssembleToSpv(source, strlen(source));
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
- std::cerr << result.GetErrorMessage();
+ dawn::ErrorLog() << result.GetErrorMessage();
return {};
}
diff --git a/chromium/third_party/dawn/third_party/BUILD.gn b/chromium/third_party/dawn/third_party/BUILD.gn
index 6ecb9cb76da..af046c876fa 100644
--- a/chromium/third_party/dawn/third_party/BUILD.gn
+++ b/chromium/third_party/dawn/third_party/BUILD.gn
@@ -27,18 +27,6 @@ config("khronos_headers_public") {
config("vulkan_headers_config") {
include_dirs = [ "khronos" ]
- if (is_win) {
- defines = [ "VK_USE_PLATFORM_WIN32_KHR" ]
- }
- if (is_linux && !is_chromeos) {
- defines = [ "VK_USE_PLATFORM_XCB_KHR" ]
- }
- if (is_android) {
- defines = [ "VK_USE_PLATFORM_ANDROID_KHR" ]
- }
- if (is_fuchsia) {
- defines = [ "VK_USE_PLATFORM_FUCHSIA" ]
- }
}
source_set("vulkan_headers") {
diff --git a/chromium/third_party/dawn/third_party/CMakeLists.txt b/chromium/third_party/dawn/third_party/CMakeLists.txt
new file mode 100644
index 00000000000..8d2a48ca605
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/CMakeLists.txt
@@ -0,0 +1,97 @@
+# Copyright 2020 The Dawn Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if (NOT TARGET SPIRV-Headers)
+ set(SPIRV_HEADERS_SKIP_EXAMPLES ON)
+ set(SPIRV_HEADERS_SKIP_INSTALL ON)
+
+ message(STATUS "Dawn: using SPIRV-Headers at ${DAWN_SPIRV_HEADERS_DIR}")
+ add_subdirectory(${DAWN_SPIRV_HEADERS_DIR})
+endif()
+
+if (NOT TARGET SPIRV-Tools)
+ set(SPIRV_SKIP_TESTS ON)
+ set(SPIRV_SKIP_EXECUTABLES ON)
+ set(SKIP_SPIRV_TOOLS_INSTALL ON)
+
+ message(STATUS "Dawn: using SPIRV-Tools at ${DAWN_SPIRV_TOOLS_DIR}")
+ add_subdirectory(${DAWN_SPIRV_TOOLS_DIR})
+endif()
+
+if (NOT TARGET glslang)
+ set(SKIP_GLSLANG_INSTALL ON)
+ set(ENABLE_SPVREMAPPER OFF)
+ set(ENABLE_GLSLANG_BINARIES OFF)
+ set(ENABLE_CTEST OFF)
+
+ message(STATUS "Dawn: using GLSLang at ${DAWN_GLSLANG_DIR}")
+ add_subdirectory(${DAWN_GLSLANG_DIR})
+endif()
+
+if (TARGET shaderc)
+ if (NOT TARGET shaderc_spvc)
+ message(FATAL_ERROR "Dawn: If shaderc is configured before Dawn, it must include SPVC")
+ endif()
+else()
+ set(SHADERC_SKIP_TESTS ON)
+ set(SHADERC_SKIP_INSTALL ON)
+ set(SHADERC_ENABLE_SPVC ON)
+
+ # Change the default value of SHADERC_ENABLE_SHARED_CRT to ON as that's what matches the
+ # CMake defaults better.
+ if(MSVC)
+ option(SHADERC_ENABLE_SHARED_CRT "Use the shared CRT instead of the static CRT" ON)
+ endif()
+
+ # Let SPVC's CMakeLists.txt deal with configuring SPIRV-Cross
+ set(SPIRV_CROSS_ENABLE_TESTS OFF)
+ set(SHADERC_SPIRV_CROSS_DIR "${DAWN_SPIRV_CROSS_DIR}")
+
+ message(STATUS "Dawn: using shaderc[_spvc] at ${DAWN_SHADERC_DIR}")
+ message(STATUS "Dawn: - with SPIRV-Cross at ${DAWN_SPIRV_CROSS_DIR}")
+ add_subdirectory(${DAWN_SHADERC_DIR})
+endif()
+
+if (DAWN_BUILD_EXAMPLES)
+ if (NOT TARGET glfw)
+ set(GLFW_BUILD_DOCS OFF)
+ set(GLFW_BUILD_TESTS OFF)
+ set(GLFW_BUILD_EXAMPLES OFF)
+
+ message(STATUS "Dawn: using GLFW at ${DAWN_GLFW_DIR}")
+ add_subdirectory(${DAWN_GLFW_DIR})
+ endif()
+
+ if (NOT TARGET glm)
+ message(STATUS "Dawn: using GLM at ${DAWN_GLM_DIR}")
+ add_subdirectory(${DAWN_GLM_DIR})
+ endif()
+endif()
+
+# Header-only library for khrplatform.h
+add_library(dawn_khronos_platform INTERFACE)
+target_sources(dawn_khronos_platform INTERFACE "${DAWN_THIRD_PARTY_DIR}/khronos/KHR/khrplatform.h")
+target_include_directories(dawn_khronos_platform INTERFACE "${DAWN_THIRD_PARTY_DIR}/khronos")
+
+# Header-only library for Vulkan headers
+add_library(dawn_vulkan_headers INTERFACE)
+target_sources(dawn_vulkan_headers INTERFACE
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vk_icd.h"
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vk_layer.h"
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vk_platform.h"
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vk_sdk_platform.h"
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vulkan.h"
+ "${DAWN_THIRD_PARTY_DIR}/khronos/vulkan/vulkan_core.h"
+)
+target_include_directories(dawn_vulkan_headers INTERFACE "${DAWN_THIRD_PARTY_DIR}/khronos")
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vk_icd.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vk_icd.h
index a2d960a6324..5dff59a16ee 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vk_icd.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vk_icd.h
@@ -89,7 +89,8 @@ typedef enum {
VK_ICD_WSI_PLATFORM_MACOS,
VK_ICD_WSI_PLATFORM_IOS,
VK_ICD_WSI_PLATFORM_DISPLAY,
- VK_ICD_WSI_PLATFORM_HEADLESS
+ VK_ICD_WSI_PLATFORM_HEADLESS,
+ VK_ICD_WSI_PLATFORM_METAL,
} VkIcdWsiPlatform;
typedef struct {
@@ -172,4 +173,11 @@ typedef struct {
VkIcdSurfaceBase base;
} VkIcdSurfaceHeadless;
+#ifdef VK_USE_PLATFORM_METAL_EXT
+typedef struct {
+ VkIcdSurfaceBase base;
+ const CAMetalLayer *pLayer;
+} VkIcdSurfaceMetal;
+#endif // VK_USE_PLATFORM_METAL_EXT
+
#endif // VKICD_H
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_android.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_android.h
index 1861802411e..9b8d3e276f8 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_android.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_android.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_ANDROID_H_
#define VULKAN_ANDROID_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_KHR_android_surface 1
struct ANativeWindow;
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_core.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_core.h
index 623b0312f80..9770c3b9be7 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_core.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_core.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_CORE_H_
#define VULKAN_CORE_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_VERSION_1_0 1
#include "vk_platform.h"
@@ -43,7 +44,7 @@ extern "C" {
#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
// Version of this file
-#define VK_HEADER_VERSION 115
+#define VK_HEADER_VERSION 125
#define VK_NULL_HANDLE 0
@@ -307,6 +308,7 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000,
VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000,
VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000,
@@ -441,11 +443,14 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000,
VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001,
VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = 1000175000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = 1000177000,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000,
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000,
+ VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000,
VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000,
VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000,
@@ -467,6 +472,12 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002,
VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000,
VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = 1000207000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = 1000207001,
+ VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = 1000207002,
+ VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = 1000207003,
+ VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = 1000207004,
+ VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = 1000207005,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000,
VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = 1000210000,
VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001,
@@ -484,6 +495,11 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001,
VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = 1000221000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000,
VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001,
@@ -507,7 +523,17 @@ typedef enum VkStructureType {
VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002,
VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001,
VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000,
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = 1000261000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000,
+ VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001,
+ VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002,
+ VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003,
+ VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004,
+ VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001,
@@ -831,6 +857,20 @@ typedef enum VkFormat {
VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
+ VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000,
+ VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001,
+ VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002,
+ VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003,
+ VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004,
+ VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005,
+ VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006,
+ VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007,
+ VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008,
+ VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009,
+ VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010,
+ VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011,
+ VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012,
+ VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013,
VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM,
VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM,
VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM,
@@ -1177,6 +1217,7 @@ typedef enum VkDynamicState {
VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004,
VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006,
VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001,
+ VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000,
VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE,
VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1),
@@ -1209,6 +1250,7 @@ typedef enum VkSamplerAddressMode {
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3,
VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4,
+ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT,
VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1),
@@ -1290,6 +1332,7 @@ typedef enum VkIndexType {
VK_INDEX_TYPE_UINT16 = 0,
VK_INDEX_TYPE_UINT32 = 1,
VK_INDEX_TYPE_NONE_NV = 1000165000,
+ VK_INDEX_TYPE_UINT8_EXT = 1000265000,
VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
@@ -1474,6 +1517,8 @@ typedef enum VkMemoryPropertyFlagBits {
VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008,
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010,
VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020,
+ VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040,
+ VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080,
VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkMemoryPropertyFlagBits;
typedef VkFlags VkMemoryPropertyFlags;
@@ -1627,6 +1672,10 @@ typedef enum VkImageViewCreateFlagBits {
VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkImageViewCreateFlagBits;
typedef VkFlags VkImageViewCreateFlags;
+
+typedef enum VkShaderModuleCreateFlagBits {
+ VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkShaderModuleCreateFlagBits;
typedef VkFlags VkShaderModuleCreateFlags;
typedef VkFlags VkPipelineCacheCreateFlags;
@@ -1637,11 +1686,19 @@ typedef enum VkPipelineCreateFlagBits {
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008,
VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010,
VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020,
+ VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040,
+ VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080,
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT,
VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE,
VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkPipelineCreateFlagBits;
typedef VkFlags VkPipelineCreateFlags;
+
+typedef enum VkPipelineShaderStageCreateFlagBits {
+ VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001,
+ VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002,
+ VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkPipelineShaderStageCreateFlagBits;
typedef VkFlags VkPipelineShaderStageCreateFlags;
typedef enum VkShaderStageFlagBits {
@@ -1720,6 +1777,10 @@ typedef enum VkFramebufferCreateFlagBits {
VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkFramebufferCreateFlagBits;
typedef VkFlags VkFramebufferCreateFlags;
+
+typedef enum VkRenderPassCreateFlagBits {
+ VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+} VkRenderPassCreateFlagBits;
typedef VkFlags VkRenderPassCreateFlags;
typedef enum VkAttachmentDescriptionFlagBits {
@@ -1815,7 +1876,8 @@ typedef VkFlags VkCommandBufferResetFlags;
typedef enum VkStencilFaceFlagBits {
VK_STENCIL_FACE_FRONT_BIT = 0x00000001,
VK_STENCIL_FACE_BACK_BIT = 0x00000002,
- VK_STENCIL_FRONT_AND_BACK = 0x00000003,
+ VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003,
+ VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK,
VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkStencilFaceFlagBits;
typedef VkFlags VkStencilFaceFlags;
@@ -5031,7 +5093,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR(
#define VK_KHR_display 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR)
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR)
-#define VK_KHR_DISPLAY_SPEC_VERSION 21
+#define VK_KHR_DISPLAY_SPEC_VERSION 23
#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display"
typedef enum VkDisplayPlaneAlphaFlagBitsKHR {
@@ -5154,7 +5216,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(
#define VK_KHR_display_swapchain 1
-#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 9
+#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10
#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain"
typedef struct VkDisplayPresentInfoKHR {
VkStructureType sType;
@@ -5177,7 +5239,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(
#define VK_KHR_sampler_mirror_clamp_to_edge 1
-#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 1
+#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3
#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge"
@@ -5193,7 +5255,7 @@ typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesK
#define VK_KHR_get_physical_device_properties2 1
-#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 1
+#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2
#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2"
typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR;
@@ -5258,7 +5320,7 @@ VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
#define VK_KHR_device_group 1
-#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 3
+#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4
#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group"
typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR;
@@ -6043,7 +6105,7 @@ typedef struct VkImageFormatListCreateInfoKHR {
#define VK_KHR_sampler_ycbcr_conversion 1
typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR;
-#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 1
+#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14
#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion"
typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR;
@@ -6147,6 +6209,17 @@ VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR(
#endif
+#define VK_KHR_shader_subgroup_extended_types 1
+#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1
+#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types"
+typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderSubgroupExtendedTypes;
+} VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR;
+
+
+
#define VK_KHR_8bit_storage 1
#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1
#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage"
@@ -6172,6 +6245,18 @@ typedef struct VkPhysicalDeviceShaderAtomicInt64FeaturesKHR {
+#define VK_KHR_shader_clock 1
+#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1
+#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock"
+typedef struct VkPhysicalDeviceShaderClockFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 shaderSubgroupClock;
+ VkBool32 shaderDeviceClock;
+} VkPhysicalDeviceShaderClockFeaturesKHR;
+
+
+
#define VK_KHR_driver_properties 1
#define VK_MAX_DRIVER_NAME_SIZE_KHR 256
#define VK_MAX_DRIVER_INFO_SIZE_KHR 256
@@ -6215,28 +6300,38 @@ typedef struct VkPhysicalDeviceDriverPropertiesKHR {
#define VK_KHR_shader_float_controls 1
-#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 1
+#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4
#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls"
+
+typedef enum VkShaderFloatControlsIndependenceKHR {
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = 0,
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = 1,
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = 2,
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_BEGIN_RANGE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR,
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_END_RANGE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR,
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_RANGE_SIZE_KHR = (VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR - VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR + 1),
+ VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkShaderFloatControlsIndependenceKHR;
typedef struct VkPhysicalDeviceFloatControlsPropertiesKHR {
- VkStructureType sType;
- void* pNext;
- VkBool32 separateDenormSettings;
- VkBool32 separateRoundingModeSettings;
- VkBool32 shaderSignedZeroInfNanPreserveFloat16;
- VkBool32 shaderSignedZeroInfNanPreserveFloat32;
- VkBool32 shaderSignedZeroInfNanPreserveFloat64;
- VkBool32 shaderDenormPreserveFloat16;
- VkBool32 shaderDenormPreserveFloat32;
- VkBool32 shaderDenormPreserveFloat64;
- VkBool32 shaderDenormFlushToZeroFloat16;
- VkBool32 shaderDenormFlushToZeroFloat32;
- VkBool32 shaderDenormFlushToZeroFloat64;
- VkBool32 shaderRoundingModeRTEFloat16;
- VkBool32 shaderRoundingModeRTEFloat32;
- VkBool32 shaderRoundingModeRTEFloat64;
- VkBool32 shaderRoundingModeRTZFloat16;
- VkBool32 shaderRoundingModeRTZFloat32;
- VkBool32 shaderRoundingModeRTZFloat64;
+ VkStructureType sType;
+ void* pNext;
+ VkShaderFloatControlsIndependenceKHR denormBehaviorIndependence;
+ VkShaderFloatControlsIndependenceKHR roundingModeIndependence;
+ VkBool32 shaderSignedZeroInfNanPreserveFloat16;
+ VkBool32 shaderSignedZeroInfNanPreserveFloat32;
+ VkBool32 shaderSignedZeroInfNanPreserveFloat64;
+ VkBool32 shaderDenormPreserveFloat16;
+ VkBool32 shaderDenormPreserveFloat32;
+ VkBool32 shaderDenormPreserveFloat64;
+ VkBool32 shaderDenormFlushToZeroFloat16;
+ VkBool32 shaderDenormFlushToZeroFloat32;
+ VkBool32 shaderDenormFlushToZeroFloat64;
+ VkBool32 shaderRoundingModeRTEFloat16;
+ VkBool32 shaderRoundingModeRTEFloat32;
+ VkBool32 shaderRoundingModeRTEFloat64;
+ VkBool32 shaderRoundingModeRTZFloat16;
+ VkBool32 shaderRoundingModeRTZFloat32;
+ VkBool32 shaderRoundingModeRTZFloat64;
} VkPhysicalDeviceFloatControlsPropertiesKHR;
@@ -6278,6 +6373,89 @@ typedef struct VkPhysicalDeviceDepthStencilResolvePropertiesKHR {
#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format"
+#define VK_KHR_timeline_semaphore 1
+#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2
+#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore"
+
+typedef enum VkSemaphoreTypeKHR {
+ VK_SEMAPHORE_TYPE_BINARY_KHR = 0,
+ VK_SEMAPHORE_TYPE_TIMELINE_KHR = 1,
+ VK_SEMAPHORE_TYPE_BEGIN_RANGE_KHR = VK_SEMAPHORE_TYPE_BINARY_KHR,
+ VK_SEMAPHORE_TYPE_END_RANGE_KHR = VK_SEMAPHORE_TYPE_TIMELINE_KHR,
+ VK_SEMAPHORE_TYPE_RANGE_SIZE_KHR = (VK_SEMAPHORE_TYPE_TIMELINE_KHR - VK_SEMAPHORE_TYPE_BINARY_KHR + 1),
+ VK_SEMAPHORE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSemaphoreTypeKHR;
+
+typedef enum VkSemaphoreWaitFlagBitsKHR {
+ VK_SEMAPHORE_WAIT_ANY_BIT_KHR = 0x00000001,
+ VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkSemaphoreWaitFlagBitsKHR;
+typedef VkFlags VkSemaphoreWaitFlagsKHR;
+typedef struct VkPhysicalDeviceTimelineSemaphoreFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 timelineSemaphore;
+} VkPhysicalDeviceTimelineSemaphoreFeaturesKHR;
+
+typedef struct VkPhysicalDeviceTimelineSemaphorePropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ uint64_t maxTimelineSemaphoreValueDifference;
+} VkPhysicalDeviceTimelineSemaphorePropertiesKHR;
+
+typedef struct VkSemaphoreTypeCreateInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreTypeKHR semaphoreType;
+ uint64_t initialValue;
+} VkSemaphoreTypeCreateInfoKHR;
+
+typedef struct VkTimelineSemaphoreSubmitInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t waitSemaphoreValueCount;
+ const uint64_t* pWaitSemaphoreValues;
+ uint32_t signalSemaphoreValueCount;
+ const uint64_t* pSignalSemaphoreValues;
+} VkTimelineSemaphoreSubmitInfoKHR;
+
+typedef struct VkSemaphoreWaitInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphoreWaitFlagsKHR flags;
+ uint32_t semaphoreCount;
+ const VkSemaphore* pSemaphores;
+ const uint64_t* pValues;
+} VkSemaphoreWaitInfoKHR;
+
+typedef struct VkSemaphoreSignalInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkSemaphore semaphore;
+ uint64_t value;
+} VkSemaphoreSignalInfoKHR;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue);
+typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfoKHR* pWaitInfo, uint64_t timeout);
+typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfoKHR* pSignalInfo);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR(
+ VkDevice device,
+ VkSemaphore semaphore,
+ uint64_t* pValue);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR(
+ VkDevice device,
+ const VkSemaphoreWaitInfoKHR* pWaitInfo,
+ uint64_t timeout);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR(
+ VkDevice device,
+ const VkSemaphoreSignalInfoKHR* pSignalInfo);
+#endif
+
+
#define VK_KHR_vulkan_memory_model 1
#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3
#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model"
@@ -6291,6 +6469,11 @@ typedef struct VkPhysicalDeviceVulkanMemoryModelFeaturesKHR {
+#define VK_KHR_spirv_1_4 1
+#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1
+#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4"
+
+
#define VK_KHR_surface_protected_capabilities 1
#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1
#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities"
@@ -6313,6 +6496,99 @@ typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR {
+#define VK_KHR_pipeline_executable_properties 1
+#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1
+#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties"
+
+typedef enum VkPipelineExecutableStatisticFormatKHR {
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BEGIN_RANGE_KHR = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_END_RANGE_KHR = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR,
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_RANGE_SIZE_KHR = (VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR - VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR + 1),
+ VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF
+} VkPipelineExecutableStatisticFormatKHR;
+typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 pipelineExecutableInfo;
+} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR;
+
+typedef struct VkPipelineInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipeline pipeline;
+} VkPipelineInfoKHR;
+
+typedef struct VkPipelineExecutablePropertiesKHR {
+ VkStructureType sType;
+ void* pNext;
+ VkShaderStageFlags stages;
+ char name[VK_MAX_DESCRIPTION_SIZE];
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ uint32_t subgroupSize;
+} VkPipelineExecutablePropertiesKHR;
+
+typedef struct VkPipelineExecutableInfoKHR {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipeline pipeline;
+ uint32_t executableIndex;
+} VkPipelineExecutableInfoKHR;
+
+typedef union VkPipelineExecutableStatisticValueKHR {
+ VkBool32 b32;
+ int64_t i64;
+ uint64_t u64;
+ double f64;
+} VkPipelineExecutableStatisticValueKHR;
+
+typedef struct VkPipelineExecutableStatisticKHR {
+ VkStructureType sType;
+ void* pNext;
+ char name[VK_MAX_DESCRIPTION_SIZE];
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ VkPipelineExecutableStatisticFormatKHR format;
+ VkPipelineExecutableStatisticValueKHR value;
+} VkPipelineExecutableStatisticKHR;
+
+typedef struct VkPipelineExecutableInternalRepresentationKHR {
+ VkStructureType sType;
+ void* pNext;
+ char name[VK_MAX_DESCRIPTION_SIZE];
+ char description[VK_MAX_DESCRIPTION_SIZE];
+ VkBool32 isText;
+ size_t dataSize;
+ void* pData;
+} VkPipelineExecutableInternalRepresentationKHR;
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR(
+ VkDevice device,
+ const VkPipelineInfoKHR* pPipelineInfo,
+ uint32_t* pExecutableCount,
+ VkPipelineExecutablePropertiesKHR* pProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pStatisticCount,
+ VkPipelineExecutableStatisticKHR* pStatistics);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR(
+ VkDevice device,
+ const VkPipelineExecutableInfoKHR* pExecutableInfo,
+ uint32_t* pInternalRepresentationCount,
+ VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations);
+#endif
+
+
#define VK_EXT_debug_report 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9
@@ -6659,7 +6935,7 @@ VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX(
#define VK_AMD_draw_indirect_count 1
-#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 1
+#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2
#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count"
typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
@@ -6836,7 +7112,7 @@ typedef struct VkExportMemoryAllocateInfoNV {
#define VK_EXT_validation_flags 1
-#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 1
+#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2
#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags"
typedef enum VkValidationCheckEXT {
@@ -6866,6 +7142,17 @@ typedef struct VkValidationFlagsEXT {
#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote"
+#define VK_EXT_texture_compression_astc_hdr 1
+#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1
+#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr"
+typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkBool32 textureCompressionASTC_HDR;
+} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT;
+
+
+
#define VK_EXT_astc_decode_mode 1
#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1
#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode"
@@ -6884,7 +7171,7 @@ typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT {
#define VK_EXT_conditional_rendering 1
-#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 1
+#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2
#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering"
typedef enum VkConditionalRenderingFlagBitsEXT {
@@ -7516,7 +7803,7 @@ typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT {
#define VK_EXT_hdr_metadata 1
-#define VK_EXT_HDR_METADATA_SPEC_VERSION 1
+#define VK_EXT_HDR_METADATA_SPEC_VERSION 2
#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata"
typedef struct VkXYColorEXT {
float x;
@@ -7700,7 +7987,7 @@ VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT(
#define VK_EXT_sampler_filter_minmax 1
-#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 1
+#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2
#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax"
typedef enum VkSamplerReductionModeEXT {
@@ -8283,6 +8570,15 @@ VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV)
#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing"
#define VK_SHADER_UNUSED_NV (~0U)
+typedef enum VkAccelerationStructureTypeNV {
+ VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0,
+ VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1,
+ VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV,
+ VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV,
+ VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1),
+ VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkAccelerationStructureTypeNV;
+
typedef enum VkRayTracingShaderGroupTypeNV {
VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0,
VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1,
@@ -8302,15 +8598,6 @@ typedef enum VkGeometryTypeNV {
VK_GEOMETRY_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
} VkGeometryTypeNV;
-typedef enum VkAccelerationStructureTypeNV {
- VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0,
- VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1,
- VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV,
- VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV,
- VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1),
- VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
-} VkAccelerationStructureTypeNV;
-
typedef enum VkCopyAccelerationStructureModeNV {
VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0,
VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1,
@@ -8578,7 +8865,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV(
#define VK_NV_representative_fragment_test 1
-#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 1
+#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2
#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test"
typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV {
VkStructureType sType;
@@ -8682,6 +8969,22 @@ VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD(
#endif
+#define VK_AMD_pipeline_compiler_control 1
+#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1
+#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control"
+
+typedef enum VkPipelineCompilerControlFlagBitsAMD {
+ VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkPipelineCompilerControlFlagBitsAMD;
+typedef VkFlags VkPipelineCompilerControlFlagsAMD;
+typedef struct VkPipelineCompilerControlCreateInfoAMD {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCompilerControlFlagsAMD compilerControlFlags;
+} VkPipelineCompilerControlCreateInfoAMD;
+
+
+
#define VK_EXT_calibrated_timestamps 1
#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 1
#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps"
@@ -8721,7 +9024,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT(
#define VK_AMD_shader_core_properties 1
-#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1
+#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2
#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties"
typedef struct VkPhysicalDeviceShaderCorePropertiesAMD {
VkStructureType sType;
@@ -8911,7 +9214,7 @@ typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV {
#define VK_NV_shader_image_footprint 1
-#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 1
+#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2
#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint"
typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV {
VkStructureType sType;
@@ -9228,6 +9531,61 @@ typedef struct VkPhysicalDeviceScalarBlockLayoutFeaturesEXT {
#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string"
+#define VK_EXT_subgroup_size_control 1
+#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2
+#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control"
+typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 subgroupSizeControl;
+ VkBool32 computeFullSubgroups;
+} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT;
+
+typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t minSubgroupSize;
+ uint32_t maxSubgroupSize;
+ uint32_t maxComputeWorkgroupSubgroups;
+ VkShaderStageFlags requiredSubgroupSizeStages;
+} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT;
+
+typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t requiredSubgroupSize;
+} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT;
+
+
+
+#define VK_AMD_shader_core_properties2 1
+#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1
+#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2"
+
+typedef enum VkShaderCorePropertiesFlagBitsAMD {
+ VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkShaderCorePropertiesFlagBitsAMD;
+typedef VkFlags VkShaderCorePropertiesFlagsAMD;
+typedef struct VkPhysicalDeviceShaderCoreProperties2AMD {
+ VkStructureType sType;
+ void* pNext;
+ VkShaderCorePropertiesFlagsAMD shaderCoreFeatures;
+ uint32_t activeComputeUnitCount;
+} VkPhysicalDeviceShaderCoreProperties2AMD;
+
+
+
+#define VK_AMD_device_coherent_memory 1
+#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1
+#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory"
+typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 deviceCoherentMemory;
+} VkPhysicalDeviceCoherentMemoryFeaturesAMD;
+
+
+
#define VK_EXT_memory_budget 1
#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1
#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget"
@@ -9315,15 +9673,16 @@ typedef struct VkImageStencilUsageCreateInfoEXT {
#define VK_EXT_validation_features 1
-#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 1
+#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 2
#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features"
typedef enum VkValidationFeatureEnableEXT {
VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0,
VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1,
+ VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2,
VK_VALIDATION_FEATURE_ENABLE_BEGIN_RANGE_EXT = VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT,
- VK_VALIDATION_FEATURE_ENABLE_END_RANGE_EXT = VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT,
- VK_VALIDATION_FEATURE_ENABLE_RANGE_SIZE_EXT = (VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT - VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT + 1),
+ VK_VALIDATION_FEATURE_ENABLE_END_RANGE_EXT = VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT,
+ VK_VALIDATION_FEATURE_ENABLE_RANGE_SIZE_EXT = (VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT - VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT + 1),
VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF
} VkValidationFeatureEnableEXT;
@@ -9489,7 +9848,7 @@ typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT {
#define VK_EXT_headless_surface 1
-#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 0
+#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1
#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface"
typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT;
typedef struct VkHeadlessSurfaceCreateInfoEXT {
@@ -9509,6 +9868,56 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT(
#endif
+#define VK_EXT_line_rasterization 1
+#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1
+#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization"
+
+typedef enum VkLineRasterizationModeEXT {
+ VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0,
+ VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1,
+ VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2,
+ VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3,
+ VK_LINE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT,
+ VK_LINE_RASTERIZATION_MODE_END_RANGE_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT,
+ VK_LINE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT - VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT + 1),
+ VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF
+} VkLineRasterizationModeEXT;
+typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 rectangularLines;
+ VkBool32 bresenhamLines;
+ VkBool32 smoothLines;
+ VkBool32 stippledRectangularLines;
+ VkBool32 stippledBresenhamLines;
+ VkBool32 stippledSmoothLines;
+} VkPhysicalDeviceLineRasterizationFeaturesEXT;
+
+typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT {
+ VkStructureType sType;
+ void* pNext;
+ uint32_t lineSubPixelPrecisionBits;
+} VkPhysicalDeviceLineRasterizationPropertiesEXT;
+
+typedef struct VkPipelineRasterizationLineStateCreateInfoEXT {
+ VkStructureType sType;
+ const void* pNext;
+ VkLineRasterizationModeEXT lineRasterizationMode;
+ VkBool32 stippledLineEnable;
+ uint32_t lineStippleFactor;
+ uint16_t lineStipplePattern;
+} VkPipelineRasterizationLineStateCreateInfoEXT;
+
+typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t lineStippleFactor,
+ uint16_t lineStipplePattern);
+#endif
+
+
#define VK_EXT_host_query_reset 1
#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1
#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset"
@@ -9529,6 +9938,17 @@ VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT(
#endif
+#define VK_EXT_index_type_uint8 1
+#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1
+#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8"
+typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT {
+ VkStructureType sType;
+ void* pNext;
+ VkBool32 indexTypeUint8;
+} VkPhysicalDeviceIndexTypeUint8FeaturesEXT;
+
+
+
#define VK_EXT_shader_demote_to_helper_invocation 1
#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1
#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation"
@@ -9559,6 +9979,11 @@ typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT {
} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT;
+
+#define VK_GOOGLE_user_type 1
+#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1
+#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type"
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_fuchsia.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_fuchsia.h
index 4c62a7c2f76..81ebe55d313 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_fuchsia.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_fuchsia.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_FUCHSIA_H_
#define VULKAN_FUCHSIA_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_FUCHSIA_imagepipe_surface 1
#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ggp.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ggp.h
index 3d67c4b8c1e..fd306131c32 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ggp.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ggp.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_GGP_H_
#define VULKAN_GGP_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_GGP_stream_descriptor_surface 1
#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ios.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ios.h
index 1846df52d5e..72ef1a8a825 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ios.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_ios.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_IOS_H_
#define VULKAN_IOS_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_MVK_ios_surface 1
#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_macos.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_macos.h
index dca623b042b..e6e5deaa366 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_macos.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_macos.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_MACOS_H_
#define VULKAN_MACOS_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_MVK_macos_surface 1
#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_metal.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_metal.h
index 16505237dfa..3dec68c7713 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_metal.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_metal.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_METAL_H_
#define VULKAN_METAL_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_EXT_metal_surface 1
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_vi.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_vi.h
index 50aa27dfb92..6fb66f9dd28 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_vi.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_vi.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_VI_H_
#define VULKAN_VI_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_NN_vi_surface 1
#define VK_NN_VI_SURFACE_SPEC_VERSION 1
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_wayland.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_wayland.h
index 12a5f045c1b..599d05b24a5 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_wayland.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_wayland.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_WAYLAND_H_
#define VULKAN_WAYLAND_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_KHR_wayland_surface 1
#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_win32.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_win32.h
index a61a7d885c2..20a1dc0e588 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_win32.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_win32.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_WIN32_H_
#define VULKAN_WIN32_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_KHR_win32_surface 1
#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6
@@ -246,7 +247,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(
#define VK_NV_win32_keyed_mutex 1
-#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 1
+#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2
#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex"
typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
VkStructureType sType;
@@ -263,7 +264,7 @@ typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
#define VK_EXT_full_screen_exclusive 1
-#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 3
+#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4
#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive"
typedef enum VkFullScreenExclusiveEXT {
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xcb.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xcb.h
index 7d6905d2d61..4cc0bc0cec5 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xcb.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xcb.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_XCB_H_
#define VULKAN_XCB_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_KHR_xcb_surface 1
#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib.h
index 7a05d297df0..ee2b48accb0 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_XLIB_H_
#define VULKAN_XLIB_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_KHR_xlib_surface 1
#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6
diff --git a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib_xrandr.h b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib_xrandr.h
index 3a209530834..08c4fd729cd 100644
--- a/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib_xrandr.h
+++ b/chromium/third_party/dawn/third_party/khronos/vulkan/vulkan_xlib_xrandr.h
@@ -1,10 +1,6 @@
#ifndef VULKAN_XLIB_XRANDR_H_
#define VULKAN_XLIB_XRANDR_H_ 1
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
@@ -27,6 +23,11 @@ extern "C" {
*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
#define VK_EXT_acquire_xlib_display 1
#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1