summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn/src
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-03-11 11:32:04 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-03-18 13:40:17 +0000
commit31ccca0778db85c159634478b4ec7997f6704860 (patch)
tree3d33fc3afd9d5ec95541e1bbe074a9cf8da12a0e /chromium/third_party/dawn/src
parent248b70b82a40964d5594eb04feca0fa36716185d (diff)
downloadqtwebengine-chromium-31ccca0778db85c159634478b4ec7997f6704860.tar.gz
BASELINE: Update Chromium to 80.0.3987.136
Change-Id: I98e1649aafae85ba3a83e67af00bb27ef301db7b Reviewed-by: Jüri Valdmann <juri.valdmann@qt.io>
Diffstat (limited to 'chromium/third_party/dawn/src')
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn4
-rw-r--r--chromium/third_party/dawn/src/common/Math.cpp2
-rw-r--r--chromium/third_party/dawn/src/common/Math.h2
-rw-r--r--chromium/third_party/dawn/src/common/Platform.h3
-rw-r--r--chromium/third_party/dawn/src/common/SwapChainUtils.h2
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.cpp117
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.h27
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Adapter.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/AttachmentState.h15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h86
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CachedObject.cpp (renamed from chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp)13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CachedObject.h41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp199
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePipeline.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp166
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h39
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EncodingContext.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorData.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ErrorScope.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Extensions.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Fence.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp165
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Forward.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ObjectBase.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PerStage.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PerStage.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp141
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp95
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp17
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RefCounted.cpp29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RefCounted.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundle.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp57
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp67
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp334
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.h61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h (renamed from chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h)22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Sampler.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp127
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/SwapChain.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp63
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp414
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp52
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp42
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp144
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h (renamed from chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.h)29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h35
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp232
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h89
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp182
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.cpp77
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp268
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h72
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp92
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp244
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/dawn_platform.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm88
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm220
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm158
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp232
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp104
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp108
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp34
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp174
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h40
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp56
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp70
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp178
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.cpp41
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp139
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.h59
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp119
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp223
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h (renamed from chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h)15
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp243
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h (renamed from chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h)29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp343
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h38
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp271
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp62
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h44
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireClient.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/WireServer.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp96
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp27
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp26
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Fence.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp28
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp2
-rw-r--r--chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h46
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_export.h36
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_proc.h4
-rw-r--r--chromium/third_party/dawn/src/include/dawn/dawn_wsi.h10
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h14
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h9
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h8
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h4
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h71
-rw-r--r--chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h9
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/Wire.h12
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h10
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h4
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.cpp14
-rw-r--r--chromium/third_party/dawn/src/utils/BackendBinding.h10
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp4
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h6
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp77
-rw-r--r--chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h24
-rw-r--r--chromium/third_party/dawn/src/utils/D3D12Binding.cpp6
-rw-r--r--chromium/third_party/dawn/src/utils/MetalBinding.mm16
-rw-r--r--chromium/third_party/dawn/src/utils/NullBinding.cpp8
-rw-r--r--chromium/third_party/dawn/src/utils/OpenGLBinding.cpp6
-rw-r--r--chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp53
-rw-r--r--chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h6
-rw-r--r--chromium/third_party/dawn/src/utils/VulkanBinding.cpp6
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp (renamed from chromium/third_party/dawn/src/utils/DawnHelpers.cpp)124
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h (renamed from chromium/third_party/dawn/src/utils/DawnHelpers.h)78
292 files changed, 6828 insertions, 4333 deletions
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index c362b4c54e8..585876d821a 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -79,7 +79,7 @@ config("dawn_internal") {
# This GN file is discovered by all Chromium builds, but common doesn't support
# all of Chromium's OSes so we explicitly make the target visible only on
# systems we know Dawn is able to compile on.
-if (is_win || is_linux || is_mac || is_fuchsia) {
+if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
static_library("common") {
sources = [
"Assert.cpp",
@@ -100,6 +100,8 @@ if (is_win || is_linux || is_mac || is_fuchsia) {
"SerialQueue.h",
"SerialStorage.h",
"SwapChainUtils.h",
+ "SystemUtils.cpp",
+ "SystemUtils.h",
"vulkan_platform.h",
"windows_with_undefs.h",
"xlib_with_undefs.h",
diff --git a/chromium/third_party/dawn/src/common/Math.cpp b/chromium/third_party/dawn/src/common/Math.cpp
index 1eeeb79cfd3..a8823e54293 100644
--- a/chromium/third_party/dawn/src/common/Math.cpp
+++ b/chromium/third_party/dawn/src/common/Math.cpp
@@ -74,7 +74,7 @@ uint64_t NextPowerOfTwo(uint64_t n) {
#endif
}
-bool IsPowerOfTwo(size_t n) {
+bool IsPowerOfTwo(uint64_t n) {
ASSERT(n != 0);
return (n & (n - 1)) == 0;
}
diff --git a/chromium/third_party/dawn/src/common/Math.h b/chromium/third_party/dawn/src/common/Math.h
index 80b8792eb1c..ac40dd96724 100644
--- a/chromium/third_party/dawn/src/common/Math.h
+++ b/chromium/third_party/dawn/src/common/Math.h
@@ -26,7 +26,7 @@
uint32_t ScanForward(uint32_t bits);
uint32_t Log2(uint32_t value);
uint32_t Log2(uint64_t value);
-bool IsPowerOfTwo(size_t n);
+bool IsPowerOfTwo(uint64_t n);
uint64_t NextPowerOfTwo(uint64_t n);
bool IsPtrAligned(const void* ptr, size_t alignment);
diff --git a/chromium/third_party/dawn/src/common/Platform.h b/chromium/third_party/dawn/src/common/Platform.h
index f6d7fc5d056..bc64db26d8f 100644
--- a/chromium/third_party/dawn/src/common/Platform.h
+++ b/chromium/third_party/dawn/src/common/Platform.h
@@ -21,6 +21,9 @@
#elif defined(__linux__)
# define DAWN_PLATFORM_LINUX 1
# define DAWN_PLATFORM_POSIX 1
+# if defined(__ANDROID__)
+# define DAWN_PLATFORM_ANDROID 1
+# endif
#elif defined(__APPLE__)
# define DAWN_PLATFORM_APPLE 1
diff --git a/chromium/third_party/dawn/src/common/SwapChainUtils.h b/chromium/third_party/dawn/src/common/SwapChainUtils.h
index af62d799619..c1ad5f2e62a 100644
--- a/chromium/third_party/dawn/src/common/SwapChainUtils.h
+++ b/chromium/third_party/dawn/src/common/SwapChainUtils.h
@@ -26,7 +26,7 @@ DawnSwapChainImplementation CreateSwapChainImplementation(T* swapChain) {
reinterpret_cast<T*>(userData)->Init(ctx);
};
impl.Destroy = [](void* userData) { delete reinterpret_cast<T*>(userData); };
- impl.Configure = [](void* userData, DawnTextureFormat format, DawnTextureUsage allowedUsage,
+ impl.Configure = [](void* userData, WGPUTextureFormat format, WGPUTextureUsage allowedUsage,
uint32_t width, uint32_t height) {
return static_cast<T*>(userData)->Configure(format, allowedUsage, width, height);
};
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.cpp b/chromium/third_party/dawn/src/common/SystemUtils.cpp
new file mode 100644
index 00000000000..88fc7d7faa9
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/SystemUtils.cpp
@@ -0,0 +1,117 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/SystemUtils.h"
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+# include <Windows.h>
+# include <vector>
+#elif defined(DAWN_PLATFORM_LINUX)
+# include <limits.h>
+# include <unistd.h>
+# include <cstdlib>
+#elif defined(DAWN_PLATFORM_MACOS)
+# include <mach-o/dyld.h>
+# include <vector>
+#endif
+
+#include <array>
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+const char* GetPathSeparator() {
+ return "\\";
+}
+
+std::string GetEnvironmentVar(const char* variableName) {
+ // First pass a size of 0 to get the size of variable value.
+ char* tempBuf = nullptr;
+ DWORD result = GetEnvironmentVariableA(variableName, tempBuf, 0);
+ if (result == 0) {
+ return "";
+ }
+
+ // Then get variable value with its actual size.
+ std::vector<char> buffer(result + 1);
+ if (GetEnvironmentVariableA(variableName, buffer.data(), static_cast<DWORD>(buffer.size())) ==
+ 0) {
+ return "";
+ }
+ return std::string(buffer.data());
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+ return SetEnvironmentVariableA(variableName, value) == TRUE;
+}
+#elif defined(DAWN_PLATFORM_POSIX)
+const char* GetPathSeparator() {
+ return "/";
+}
+
+std::string GetEnvironmentVar(const char* variableName) {
+ char* value = getenv(variableName);
+ return value == nullptr ? "" : std::string(value);
+}
+
+bool SetEnvironmentVar(const char* variableName, const char* value) {
+ return setenv(variableName, value, 1) == 0;
+}
+#else
+# error "Implement Get/SetEnvironmentVar for your platform."
+#endif
+
+#if defined(DAWN_PLATFORM_WINDOWS)
+std::string GetExecutablePath() {
+ std::array<char, MAX_PATH> executableFileBuf;
+ DWORD executablePathLen = GetModuleFileNameA(nullptr, executableFileBuf.data(),
+ static_cast<DWORD>(executableFileBuf.size()));
+ return executablePathLen > 0 ? std::string(executableFileBuf.data()) : "";
+}
+#elif defined(DAWN_PLATFORM_LINUX)
+std::string GetExecutablePath() {
+ std::array<char, PATH_MAX> path;
+ ssize_t result = readlink("/proc/self/exe", path.data(), PATH_MAX - 1);
+ if (result < 0 || static_cast<size_t>(result) >= PATH_MAX - 1) {
+ return "";
+ }
+
+ path[result] = '\0';
+ return path.data();
+}
+#elif defined(DAWN_PLATFORM_MACOS)
+std::string GetExecutablePath() {
+ uint32_t size = 0;
+ _NSGetExecutablePath(nullptr, &size);
+
+ std::vector<char> buffer(size + 1);
+ if (_NSGetExecutablePath(buffer.data(), &size) != 0) {
+ return "";
+ }
+
+ buffer[size] = '\0';
+ return buffer.data();
+}
+#elif defined(DAWN_PLATFORM_FUCHSIA)
+std::string GetExecutablePath() {
+ // TODO: Implement on Fuchsia
+ return "";
+}
+#else
+# error "Implement GetExecutablePath for your platform."
+#endif
+
+std::string GetExecutableDirectory() {
+ std::string exePath = GetExecutablePath();
+ size_t lastPathSepLoc = exePath.find_last_of(GetPathSeparator());
+ return lastPathSepLoc != std::string::npos ? exePath.substr(0, lastPathSepLoc + 1) : "";
+}
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.h b/chromium/third_party/dawn/src/common/SystemUtils.h
new file mode 100644
index 00000000000..2edf1e3a257
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/SystemUtils.h
@@ -0,0 +1,27 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_SYSTEMUTILS_H_
+#define COMMON_SYSTEMUTILS_H_
+
+#include "common/Platform.h"
+
+#include <string>
+
+const char* GetPathSeparator();
+std::string GetEnvironmentVar(const char* variableName);
+bool SetEnvironmentVar(const char* variableName, const char* value);
+std::string GetExecutableDirectory();
+
+#endif // COMMON_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index b0c449e8459..2ee0996b9f6 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -34,6 +34,7 @@ dawn_json_generator("dawn_headers_gen") {
outputs = [
"src/include/dawn/dawn.h",
"src/include/dawn/dawn_proc_table.h",
+ "src/include/dawn/webgpu.h",
]
}
@@ -44,10 +45,7 @@ source_set("dawn_headers") {
]
sources = get_target_outputs(":dawn_headers_gen")
- sources += [
- "${dawn_root}/src/include/dawn/dawn_export.h",
- "${dawn_root}/src/include/dawn/dawn_wsi.h",
- ]
+ sources += [ "${dawn_root}/src/include/dawn/dawn_wsi.h" ]
}
###############################################################################
@@ -58,6 +56,7 @@ dawn_json_generator("dawncpp_headers_gen") {
target = "dawncpp_headers"
outputs = [
"src/include/dawn/dawncpp.h",
+ "src/include/dawn/webgpu_cpp.h",
]
}
@@ -78,7 +77,7 @@ source_set("dawncpp_headers") {
dawn_json_generator("dawncpp_gen") {
target = "dawncpp"
outputs = [
- "src/dawn/dawncpp.cpp",
+ "src/dawn/webgpu_cpp.cpp",
]
}
@@ -102,7 +101,7 @@ dawn_json_generator("libdawn_proc_gen") {
}
dawn_component("libdawn_proc") {
- DEFINE_PREFIX = "DAWN"
+ DEFINE_PREFIX = "WGPU"
public_deps = [
":dawn_headers",
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
index 362b540c0bd..02f09663998 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.cpp
@@ -56,6 +56,13 @@ namespace dawn_native {
return true;
}
+ WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+ WGPUDeviceProperties adapterProperties = {};
+
+ mSupportedExtensions.InitializeDeviceProperties(&adapterProperties);
+ return adapterProperties;
+ }
+
DeviceBase* AdapterBase::CreateDevice(const DeviceDescriptor* descriptor) {
DeviceBase* result = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/Adapter.h b/chromium/third_party/dawn/src/dawn_native/Adapter.h
index f4cb28ae6ef..410a9a35a46 100644
--- a/chromium/third_party/dawn/src/dawn_native/Adapter.h
+++ b/chromium/third_party/dawn/src/dawn_native/Adapter.h
@@ -39,6 +39,7 @@ namespace dawn_native {
ExtensionsSet GetSupportedExtensions() const;
bool SupportsAllRequestedExtensions(
const std::vector<const char*>& requestedExtensions) const;
+ WGPUDeviceProperties GetAdapterProperties() const;
protected:
PCIInfo mPCIInfo = {};
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
index 3cd6621404d..5ff33b3309b 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.cpp
@@ -116,27 +116,27 @@ namespace dawn_native {
}
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
- : AttachmentStateBlueprint(blueprint), RefCounted(), mDevice(device) {
+ : AttachmentStateBlueprint(blueprint), CachedObject(device) {
}
AttachmentState::~AttachmentState() {
- mDevice->UncacheAttachmentState(this);
+ GetDevice()->UncacheAttachmentState(this);
}
std::bitset<kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask() const {
return mColorAttachmentsSet;
}
- dawn::TextureFormat AttachmentState::GetColorAttachmentFormat(uint32_t index) const {
+ wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(uint32_t index) const {
ASSERT(mColorAttachmentsSet[index]);
return mColorFormats[index];
}
bool AttachmentState::HasDepthStencilAttachment() const {
- return mDepthStencilFormat != dawn::TextureFormat::Undefined;
+ return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
}
- dawn::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+ wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
ASSERT(HasDepthStencilAttachment());
return mDepthStencilFormat;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
index 7a2001a35e5..74c5432f238 100644
--- a/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
+++ b/chromium/third_party/dawn/src/dawn_native/AttachmentState.h
@@ -16,7 +16,7 @@
#define DAWNNATIVE_ATTACHMENTSTATE_H_
#include "common/Constants.h"
-#include "dawn_native/RefCounted.h"
+#include "dawn_native/CachedObject.h"
#include "dawn_native/dawn_platform.h"
@@ -50,25 +50,22 @@ namespace dawn_native {
protected:
std::bitset<kMaxColorAttachments> mColorAttachmentsSet;
- std::array<dawn::TextureFormat, kMaxColorAttachments> mColorFormats;
+ std::array<wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
// Default (texture format Undefined) indicates there is no depth stencil attachment.
- dawn::TextureFormat mDepthStencilFormat = dawn::TextureFormat::Undefined;
+ wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
uint32_t mSampleCount = 0;
};
- class AttachmentState : public AttachmentStateBlueprint, public RefCounted {
+ class AttachmentState : public AttachmentStateBlueprint, public CachedObject {
public:
AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
~AttachmentState() override;
std::bitset<kMaxColorAttachments> GetColorAttachmentsMask() const;
- dawn::TextureFormat GetColorAttachmentFormat(uint32_t index) const;
+ wgpu::TextureFormat GetColorAttachmentFormat(uint32_t index) const;
bool HasDepthStencilAttachment() const;
- dawn::TextureFormat GetDepthStencilFormat() const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
uint32_t GetSampleCount() const;
-
- private:
- DeviceBase* mDevice;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index ceaf913b9d4..9e85be14dae 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -30,7 +30,7 @@ namespace dawn_native {
MaybeError ValidateBufferBinding(const DeviceBase* device,
const BindGroupBinding& binding,
- dawn::BufferUsage requiredUsage) {
+ wgpu::BufferUsage requiredUsage) {
if (binding.buffer == nullptr || binding.sampler != nullptr ||
binding.textureView != nullptr) {
return DAWN_VALIDATION_ERROR("expected buffer binding");
@@ -38,7 +38,7 @@ namespace dawn_native {
DAWN_TRY(device->ValidateObject(binding.buffer));
uint64_t bufferSize = binding.buffer->GetSize();
- uint64_t bindingSize = (binding.size == dawn::kWholeSize) ? bufferSize : binding.size;
+ uint64_t bindingSize = (binding.size == wgpu::kWholeSize) ? bufferSize : binding.size;
if (bindingSize > bufferSize) {
return DAWN_VALIDATION_ERROR("Buffer binding size larger than the buffer");
}
@@ -63,10 +63,10 @@ namespace dawn_native {
MaybeError ValidateTextureBinding(const DeviceBase* device,
const BindGroupBinding& binding,
- dawn::TextureUsage requiredUsage,
+ wgpu::TextureUsage requiredUsage,
bool multisampledBinding,
- dawn::TextureComponentType requiredComponentType,
- dawn::TextureViewDimension requiredDimension) {
+ wgpu::TextureComponentType requiredComponentType,
+ wgpu::TextureViewDimension requiredDimension) {
if (binding.textureView == nullptr || binding.sampler != nullptr ||
binding.buffer != nullptr) {
return DAWN_VALIDATION_ERROR("expected texture binding");
@@ -143,23 +143,23 @@ namespace dawn_native {
// Perform binding-type specific validation.
switch (layoutInfo.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer:
- DAWN_TRY(ValidateBufferBinding(device, binding, dawn::BufferUsage::Uniform));
+ case wgpu::BindingType::UniformBuffer:
+ DAWN_TRY(ValidateBufferBinding(device, binding, wgpu::BufferUsage::Uniform));
break;
- case dawn::BindingType::StorageBuffer:
- DAWN_TRY(ValidateBufferBinding(device, binding, dawn::BufferUsage::Storage));
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ DAWN_TRY(ValidateBufferBinding(device, binding, wgpu::BufferUsage::Storage));
break;
- case dawn::BindingType::SampledTexture:
- DAWN_TRY(ValidateTextureBinding(device, binding, dawn::TextureUsage::Sampled,
+ case wgpu::BindingType::SampledTexture:
+ DAWN_TRY(ValidateTextureBinding(device, binding, wgpu::TextureUsage::Sampled,
layoutInfo.multisampled[bindingIndex],
layoutInfo.textureComponentTypes[bindingIndex],
layoutInfo.textureDimensions[bindingIndex]));
break;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
DAWN_TRY(ValidateSamplerBinding(device, binding));
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
}
@@ -193,7 +193,7 @@ namespace dawn_native {
mBindings[bindingIndex] = binding.buffer;
mOffsets[bindingIndex] = binding.offset;
uint64_t bufferSize =
- (binding.size == dawn::kWholeSize) ? binding.buffer->GetSize() : binding.size;
+ (binding.size == wgpu::kWholeSize) ? binding.buffer->GetSize() : binding.size;
mSizes[bindingIndex] = bufferSize;
continue;
}
@@ -221,7 +221,7 @@ namespace dawn_native {
return new BindGroupBase(device, ObjectBase::kError);
}
- const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+ BindGroupLayoutBase* BindGroupBase::GetLayout() {
ASSERT(!IsError());
return mLayout.Get();
}
@@ -230,8 +230,10 @@ namespace dawn_native {
ASSERT(!IsError());
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == dawn::BindingType::UniformBuffer ||
- mLayout->GetBindingInfo().types[binding] == dawn::BindingType::StorageBuffer);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::UniformBuffer ||
+ mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::StorageBuffer ||
+ mLayout->GetBindingInfo().types[binding] ==
+ wgpu::BindingType::ReadonlyStorageBuffer);
BufferBase* buffer = static_cast<BufferBase*>(mBindings[binding].Get());
return {buffer, mOffsets[binding], mSizes[binding]};
}
@@ -240,7 +242,7 @@ namespace dawn_native {
ASSERT(!IsError());
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == dawn::BindingType::Sampler);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::Sampler);
return static_cast<SamplerBase*>(mBindings[binding].Get());
}
@@ -248,7 +250,7 @@ namespace dawn_native {
ASSERT(!IsError());
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(mLayout->GetBindingInfo().mask[binding]);
- ASSERT(mLayout->GetBindingInfo().types[binding] == dawn::BindingType::SampledTexture);
+ ASSERT(mLayout->GetBindingInfo().types[binding] == wgpu::BindingType::SampledTexture);
return static_cast<TextureViewBase*>(mBindings[binding].Get());
}
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.h b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
index d31fff4f83f..fae804d1235 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.h
@@ -44,7 +44,7 @@ namespace dawn_native {
static BindGroupBase* MakeError(DeviceBase* device);
- const BindGroupLayoutBase* GetLayout() const;
+ BindGroupLayoutBase* GetLayout();
BufferBinding GetBindingAsBufferBinding(size_t binding);
SamplerBase* GetBindingAsSampler(size_t binding);
TextureViewBase* GetBindingAsTextureView(size_t binding);
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
new file mode 100644
index 00000000000..0c016e1be7f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
+#define DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
+
+#include "dawn_native/BindGroupTracker.h"
+
+#include "dawn_native/BindGroup.h"
+
+namespace dawn_native {
+
+ // Extends BindGroupTrackerBase to also keep track of resources that need a usage transition.
+ template <bool CanInheritBindGroups, typename DynamicOffset>
+ class BindGroupAndStorageBarrierTrackerBase
+ : public BindGroupTrackerBase<CanInheritBindGroups, DynamicOffset> {
+ using Base = BindGroupTrackerBase<CanInheritBindGroups, DynamicOffset>;
+
+ public:
+ BindGroupAndStorageBarrierTrackerBase() = default;
+
+ void OnSetBindGroup(uint32_t index,
+ BindGroupBase* bindGroup,
+ uint32_t dynamicOffsetCount,
+ uint32_t* dynamicOffsets) {
+ if (this->mBindGroups[index] != bindGroup) {
+ mBuffers[index] = {};
+ mBuffersNeedingBarrier[index] = {};
+
+ const BindGroupLayoutBase* layout = bindGroup->GetLayout();
+ const auto& info = layout->GetBindingInfo();
+
+ for (uint32_t binding : IterateBitSet(info.mask)) {
+ if ((info.visibilities[binding] & wgpu::ShaderStage::Compute) == 0) {
+ continue;
+ }
+
+ mBindingTypes[index][binding] = info.types[binding];
+ switch (info.types[binding]) {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ // Don't require barriers.
+ break;
+
+ case wgpu::BindingType::StorageBuffer:
+ mBuffersNeedingBarrier[index].set(binding);
+ mBuffers[index][binding] =
+ bindGroup->GetBindingAsBufferBinding(binding).buffer;
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ // Not implemented.
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ Base::OnSetBindGroup(index, bindGroup, dynamicOffsetCount, dynamicOffsets);
+ }
+
+ protected:
+ std::array<std::bitset<kMaxBindingsPerGroup>, kMaxBindGroups> mBuffersNeedingBarrier = {};
+ std::array<std::array<wgpu::BindingType, kMaxBindingsPerGroup>, kMaxBindGroups>
+ mBindingTypes = {};
+ std::array<std::array<BufferBase*, kMaxBindingsPerGroup>, kMaxBindGroups> mBuffers = {};
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index 2001b6be70c..3d0ecd2d36b 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -38,7 +38,7 @@ namespace dawn_native {
DAWN_TRY(ValidateBindingType(binding.type));
DAWN_TRY(ValidateTextureComponentType(binding.textureComponentType));
- if (binding.textureDimension != dawn::TextureViewDimension::Undefined) {
+ if (binding.textureDimension != wgpu::TextureViewDimension::Undefined) {
DAWN_TRY(ValidateTextureViewDimension(binding.textureDimension));
}
@@ -50,25 +50,24 @@ namespace dawn_native {
}
switch (binding.type) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
if (binding.hasDynamicOffset) {
++dynamicUniformBufferCount;
}
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
if (binding.hasDynamicOffset) {
++dynamicStorageBufferCount;
}
break;
- case dawn::BindingType::SampledTexture:
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
if (binding.hasDynamicOffset) {
return DAWN_VALIDATION_ERROR("Samplers and textures cannot be dynamic");
}
break;
- case dawn::BindingType::ReadonlyStorageBuffer:
- return DAWN_VALIDATION_ERROR("readonly storage buffers aren't supported (yet)");
- case dawn::BindingType::StorageTexture:
+ case wgpu::BindingType::StorageTexture:
return DAWN_VALIDATION_ERROR("storage textures aren't supported (yet)");
}
@@ -129,9 +128,8 @@ namespace dawn_native {
// BindGroupLayoutBase
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool blueprint)
- : ObjectBase(device), mIsBlueprint(blueprint) {
+ const BindGroupLayoutDescriptor* descriptor)
+ : CachedObject(device) {
for (uint32_t i = 0; i < descriptor->bindingCount; ++i) {
auto& binding = descriptor->bindings[i];
@@ -140,24 +138,24 @@ namespace dawn_native {
mBindingInfo.types[index] = binding.type;
mBindingInfo.textureComponentTypes[index] = binding.textureComponentType;
- if (binding.textureDimension == dawn::TextureViewDimension::Undefined) {
- mBindingInfo.textureDimensions[index] = dawn::TextureViewDimension::e2D;
+ if (binding.textureDimension == wgpu::TextureViewDimension::Undefined) {
+ mBindingInfo.textureDimensions[index] = wgpu::TextureViewDimension::e2D;
} else {
mBindingInfo.textureDimensions[index] = binding.textureDimension;
}
if (binding.hasDynamicOffset) {
mBindingInfo.hasDynamicOffset.set(index);
switch (binding.type) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
++mDynamicUniformBufferCount;
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
++mDynamicStorageBufferCount;
break;
- case dawn::BindingType::SampledTexture:
- case dawn::BindingType::Sampler:
- case dawn::BindingType::ReadonlyStorageBuffer:
- case dawn::BindingType::StorageTexture:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::StorageTexture:
UNREACHABLE();
break;
}
@@ -171,12 +169,12 @@ namespace dawn_native {
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mIsBlueprint(true) {
+ : CachedObject(device, tag) {
}
BindGroupLayoutBase::~BindGroupLayoutBase() {
// Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncacheBindGroupLayout(this);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index c241cabb25d..4c0dd7aae8d 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -16,9 +16,9 @@
#define DAWNNATIVE_BINDGROUPLAYOUT_H_
#include "common/Constants.h"
+#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -30,20 +30,18 @@ namespace dawn_native {
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase*,
const BindGroupLayoutDescriptor* descriptor);
- class BindGroupLayoutBase : public ObjectBase {
+ class BindGroupLayoutBase : public CachedObject {
public:
- BindGroupLayoutBase(DeviceBase* device,
- const BindGroupLayoutDescriptor* descriptor,
- bool blueprint = false);
+ BindGroupLayoutBase(DeviceBase* device, const BindGroupLayoutDescriptor* descriptor);
~BindGroupLayoutBase() override;
static BindGroupLayoutBase* MakeError(DeviceBase* device);
struct LayoutBindingInfo {
- std::array<dawn::ShaderStage, kMaxBindingsPerGroup> visibilities;
- std::array<dawn::BindingType, kMaxBindingsPerGroup> types;
- std::array<dawn::TextureComponentType, kMaxBindingsPerGroup> textureComponentTypes;
- std::array<dawn::TextureViewDimension, kMaxBindingsPerGroup> textureDimensions;
+ std::array<wgpu::ShaderStage, kMaxBindingsPerGroup> visibilities;
+ std::array<wgpu::BindingType, kMaxBindingsPerGroup> types;
+ std::array<wgpu::TextureComponentType, kMaxBindingsPerGroup> textureComponentTypes;
+ std::array<wgpu::TextureViewDimension, kMaxBindingsPerGroup> textureDimensions;
std::bitset<kMaxBindingsPerGroup> hasDynamicOffset;
std::bitset<kMaxBindingsPerGroup> multisampled;
std::bitset<kMaxBindingsPerGroup> mask;
@@ -66,7 +64,6 @@ namespace dawn_native {
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
LayoutBindingInfo mBindingInfo;
- bool mIsBlueprint = false;
uint32_t mDynamicUniformBufferCount = 0;
uint32_t mDynamicStorageBufferCount = 0;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
index da992a6d7f1..121dd0ffa8e 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
@@ -27,17 +27,15 @@ namespace dawn_native {
// Keeps track of the dirty bind groups so they can be lazily applied when we know the
// pipeline state or it changes.
- // |BindGroup| is a template parameter so a backend may provide its backend-specific
- // type or native handle.
// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
// in other backends.
- template <typename BindGroup, bool CanInheritBindGroups, typename DynamicOffset = uint64_t>
+ template <bool CanInheritBindGroups, typename DynamicOffset>
class BindGroupTrackerBase {
public:
void OnSetBindGroup(uint32_t index,
- BindGroup bindGroup,
+ BindGroupBase* bindGroup,
uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
+ uint32_t* dynamicOffsets) {
ASSERT(index < kMaxBindGroups);
if (mBindGroupLayoutsMask[index]) {
@@ -103,7 +101,7 @@ namespace dawn_native {
std::bitset<kMaxBindGroups> mDirtyBindGroups = 0;
std::bitset<kMaxBindGroups> mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
std::bitset<kMaxBindGroups> mBindGroupLayoutsMask = 0;
- std::array<BindGroup, kMaxBindGroups> mBindGroups = {};
+ std::array<BindGroupBase*, kMaxBindGroups> mBindGroups = {};
std::array<uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
std::array<std::array<DynamicOffset, kMaxBindingsPerGroup>, kMaxBindGroups>
mDynamicOffsets = {};
@@ -115,21 +113,20 @@ namespace dawn_native {
PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
private:
- // Vulkan backend use uint32_t as dynamic offsets type, it is not correct.
- // Vulkan should use VkDeviceSize. Dawn vulkan backend has to handle this.
- static void SetDynamicOffsets(uint32_t* data,
+ // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+ // in other backends.
+ static void SetDynamicOffsets(uint64_t* data,
uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
+ uint32_t* dynamicOffsets) {
for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
- ASSERT(dynamicOffsets[i] <= std::numeric_limits<uint32_t>::max());
- data[i] = static_cast<uint32_t>(dynamicOffsets[i]);
+ data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
}
}
- static void SetDynamicOffsets(uint64_t* data,
+ static void SetDynamicOffsets(uint32_t* data,
uint32_t dynamicOffsetCount,
- uint64_t* dynamicOffsets) {
- memcpy(data, dynamicOffsets, sizeof(uint64_t) * dynamicOffsetCount);
+ uint32_t* dynamicOffsets) {
+ memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
}
};
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
index 87f4743e1ff..c3caa5be442 100644
--- a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.cpp
@@ -11,35 +11,45 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+
#include "dawn_native/BuddyMemoryAllocator.h"
#include "common/Math.h"
+#include "dawn_native/ResourceHeapAllocator.h"
namespace dawn_native {
- BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxBlockSize,
- uint64_t memorySize,
- std::unique_ptr<MemoryAllocator> client)
- : mMemorySize(memorySize), mBuddyBlockAllocator(maxBlockSize), mClient(std::move(client)) {
- ASSERT(memorySize <= maxBlockSize);
- ASSERT(IsPowerOfTwo(mMemorySize));
- ASSERT(maxBlockSize % mMemorySize == 0);
-
- mTrackedSubAllocations.resize(maxBlockSize / mMemorySize);
+ BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator)
+ : mMemoryBlockSize(memoryBlockSize),
+ mBuddyBlockAllocator(maxSystemSize),
+ mHeapAllocator(heapAllocator) {
+ ASSERT(memoryBlockSize <= maxSystemSize);
+ ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+ ASSERT(maxSystemSize % mMemoryBlockSize == 0);
+
+ mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
}
uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
ASSERT(offset != BuddyAllocator::kInvalidOffset);
- return offset / mMemorySize;
+ return offset / mMemoryBlockSize;
}
ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
- uint64_t alignment,
- int memoryFlags) {
+ uint64_t alignment) {
ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+ if (allocationSize == 0) {
+ return invalidAllocation;
+ }
+
+ // Round allocation size to nearest power-of-two.
+ allocationSize = NextPowerOfTwo(allocationSize);
+
// Allocation cannot exceed the memory size.
- if (allocationSize == 0 || allocationSize > mMemorySize) {
+ if (allocationSize > mMemoryBlockSize) {
return invalidAllocation;
}
@@ -53,7 +63,7 @@ namespace dawn_native {
if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
// Transfer ownership to this allocator
std::unique_ptr<ResourceHeapBase> memory;
- DAWN_TRY_ASSIGN(memory, mClient->Allocate(mMemorySize, memoryFlags));
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
}
@@ -64,11 +74,11 @@ namespace dawn_native {
info.mMethod = AllocationMethod::kSubAllocated;
// Allocation offset is always local to the memory.
- const uint64_t memoryOffset = blockOffset % mMemorySize;
+ const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
return ResourceMemoryAllocation{
info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
- } // namespace dawn_native
+ }
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
const AllocationInfo info = allocation.GetInfo();
@@ -78,18 +88,18 @@ namespace dawn_native {
const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
-
mTrackedSubAllocations[memoryIndex].refcount--;
if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
- mClient->Deallocate(std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+ mHeapAllocator->DeallocateResourceHeap(
+ std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
}
mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
}
- uint64_t BuddyMemoryAllocator::GetMemorySize() const {
- return mMemorySize;
+ uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+ return mMemoryBlockSize;
}
uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
@@ -101,4 +111,5 @@ namespace dawn_native {
}
return count;
}
-} // namespace dawn_native \ No newline at end of file
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
index b31b40074b4..c057f748223 100644
--- a/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/BuddyMemoryAllocator.h
@@ -15,13 +15,17 @@
#ifndef DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
#define DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
-#include <vector>
-
#include "dawn_native/BuddyAllocator.h"
-#include "dawn_native/MemoryAllocator.h"
+#include "dawn_native/Error.h"
#include "dawn_native/ResourceMemoryAllocation.h"
+#include <memory>
+#include <vector>
+
namespace dawn_native {
+
+ class ResourceHeapAllocator;
+
// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
// memory created by MemoryAllocator clients. It creates a very large buddy system
// where backing device memory blocks equal a specified level in the system.
@@ -31,24 +35,20 @@ namespace dawn_native {
// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
// release the other prematurely.
//
- // The device will only create up to Log2(kMaxResourceSize) allocators and can prefer speed
- // over memory footprint by selecting an allocator with a higher memory threshold which results
- // in pre-allocating more memory.
- //
- // The resource allocation is guaranteed by the device to have compatible memory flags.
+ // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+ // It should also outlive all the resources that are in the buddy allocator.
class BuddyMemoryAllocator {
public:
- BuddyMemoryAllocator(uint64_t maxBlockSize,
- uint64_t memorySize,
- std::unique_ptr<MemoryAllocator> client);
+ BuddyMemoryAllocator(uint64_t maxSystemSize,
+ uint64_t memoryBlockSize,
+ ResourceHeapAllocator* heapAllocator);
~BuddyMemoryAllocator() = default;
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
- uint64_t alignment,
- int memoryFlags = 0);
+ uint64_t alignment);
void Deallocate(const ResourceMemoryAllocation& allocation);
- uint64_t GetMemorySize() const;
+ uint64_t GetMemoryBlockSize() const;
// For testing purposes.
uint64_t ComputeTotalNumOfHeapsForTesting() const;
@@ -56,10 +56,10 @@ namespace dawn_native {
private:
uint64_t GetMemoryIndex(uint64_t offset) const;
- uint64_t mMemorySize = 0;
+ uint64_t mMemoryBlockSize = 0;
BuddyAllocator mBuddyBlockAllocator;
- std::unique_ptr<MemoryAllocator> mClient;
+ ResourceHeapAllocator* mHeapAllocator;
struct TrackedSubAllocations {
size_t refcount = 0;
@@ -68,6 +68,7 @@ namespace dawn_native {
std::vector<TrackedSubAllocations> mTrackedSubAllocations;
};
+
} // namespace dawn_native
#endif // DAWNNATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index 21bc305fa1f..c57d23a129c 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -91,17 +91,17 @@ namespace dawn_native {
DAWN_TRY(ValidateBufferUsage(descriptor->usage));
- dawn::BufferUsage usage = descriptor->usage;
+ wgpu::BufferUsage usage = descriptor->usage;
- const dawn::BufferUsage kMapWriteAllowedUsages =
- dawn::BufferUsage::MapWrite | dawn::BufferUsage::CopySrc;
- if (usage & dawn::BufferUsage::MapWrite && (usage & kMapWriteAllowedUsages) != usage) {
+ const wgpu::BufferUsage kMapWriteAllowedUsages =
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+ if (usage & wgpu::BufferUsage::MapWrite && (usage & kMapWriteAllowedUsages) != usage) {
return DAWN_VALIDATION_ERROR("Only CopySrc is allowed with MapWrite");
}
- const dawn::BufferUsage kMapReadAllowedUsages =
- dawn::BufferUsage::MapRead | dawn::BufferUsage::CopyDst;
- if (usage & dawn::BufferUsage::MapRead && (usage & kMapReadAllowedUsages) != usage) {
+ const wgpu::BufferUsage kMapReadAllowedUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+ if (usage & wgpu::BufferUsage::MapRead && (usage & kMapReadAllowedUsages) != usage) {
return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
}
@@ -115,6 +115,12 @@ namespace dawn_native {
mSize(descriptor->size),
mUsage(descriptor->usage),
mState(BufferState::Unmapped) {
+ // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+ // ValidatePassResourceUsage will make sure we don't use both at the same
+ // time.
+ if (mUsage & wgpu::BufferUsage::Storage) {
+ mUsage |= kReadOnlyStorage;
+ }
}
BufferBase::BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -124,8 +130,8 @@ namespace dawn_native {
BufferBase::~BufferBase() {
if (mState == BufferState::Mapped) {
ASSERT(!IsError());
- CallMapReadCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
- CallMapWriteCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
+ CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
+ CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
}
}
@@ -146,7 +152,7 @@ namespace dawn_native {
return mSize;
}
- dawn::BufferUsage BufferBase::GetUsage() const {
+ wgpu::BufferUsage BufferBase::GetUsage() const {
ASSERT(!IsError());
return mUsage;
}
@@ -189,7 +195,7 @@ namespace dawn_native {
}
void BufferBase::CallMapReadCallback(uint32_t serial,
- DawnBufferMapAsyncStatus status,
+ WGPUBufferMapAsyncStatus status,
const void* pointer,
uint32_t dataLength) {
ASSERT(!IsError());
@@ -197,14 +203,14 @@ namespace dawn_native {
ASSERT(mMapWriteCallback == nullptr);
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
- DawnBufferMapReadCallback callback = mMapReadCallback;
+ WGPUBufferMapReadCallback callback = mMapReadCallback;
mMapReadCallback = nullptr;
callback(status, pointer, dataLength, mMapUserdata);
}
}
void BufferBase::CallMapWriteCallback(uint32_t serial,
- DawnBufferMapAsyncStatus status,
+ WGPUBufferMapAsyncStatus status,
void* pointer,
uint32_t dataLength) {
ASSERT(!IsError());
@@ -212,7 +218,7 @@ namespace dawn_native {
ASSERT(mMapReadCallback == nullptr);
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
- DawnBufferMapWriteCallback callback = mMapWriteCallback;
+ WGPUBufferMapWriteCallback callback = mMapWriteCallback;
mMapWriteCallback = nullptr;
callback(status, pointer, dataLength, mMapUserdata);
}
@@ -229,9 +235,9 @@ namespace dawn_native {
}
}
- void BufferBase::MapReadAsync(DawnBufferMapReadCallback callback, void* userdata) {
- if (GetDevice()->ConsumedError(ValidateMap(dawn::BufferUsage::MapRead))) {
- callback(DAWN_BUFFER_MAP_ASYNC_STATUS_ERROR, nullptr, 0, userdata);
+ void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
+ if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead))) {
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
@@ -265,9 +271,9 @@ namespace dawn_native {
return {};
}
- void BufferBase::MapWriteAsync(DawnBufferMapWriteCallback callback, void* userdata) {
- if (GetDevice()->ConsumedError(ValidateMap(dawn::BufferUsage::MapWrite))) {
- callback(DAWN_BUFFER_MAP_ASYNC_STATUS_ERROR, nullptr, 0, userdata);
+ void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
+ if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite))) {
+ callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
@@ -333,8 +339,8 @@ namespace dawn_native {
// completed before the Unmap.
// Callbacks are not fired if there is no callback registered, so this is correct for
// CreateBufferMapped.
- CallMapReadCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
- CallMapWriteCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
+ CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
+ CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
UnmapImpl();
}
mState = BufferState::Unmapped;
@@ -374,14 +380,14 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Buffer subdata out of range");
}
- if (!(mUsage & dawn::BufferUsage::CopyDst)) {
+ if (!(mUsage & wgpu::BufferUsage::CopyDst)) {
return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
}
return {};
}
- MaybeError BufferBase::ValidateMap(dawn::BufferUsage requiredUsage) const {
+ MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage) const {
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
@@ -409,7 +415,7 @@ namespace dawn_native {
// even if it did not have a mappable usage.
return {};
case BufferState::Unmapped:
- if ((mUsage & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) == 0) {
+ if ((mUsage & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) == 0) {
return DAWN_VALIDATION_ERROR("Buffer does not have map usage");
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 9549c50e825..054e5550456 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -27,12 +27,16 @@ namespace dawn_native {
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
- static constexpr dawn::BufferUsage kReadOnlyBufferUsages =
- dawn::BufferUsage::MapRead | dawn::BufferUsage::CopySrc | dawn::BufferUsage::Index |
- dawn::BufferUsage::Vertex | dawn::BufferUsage::Uniform;
+ // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
+ static constexpr wgpu::BufferUsage kReadOnlyStorage =
+ static_cast<wgpu::BufferUsage>(0x80000000);
- static constexpr dawn::BufferUsage kWritableBufferUsages =
- dawn::BufferUsage::MapWrite | dawn::BufferUsage::CopyDst | dawn::BufferUsage::Storage;
+ static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+ wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+ wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorage;
+
+ static constexpr wgpu::BufferUsage kWritableBufferUsages =
+ wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage;
class BufferBase : public ObjectBase {
enum class BufferState {
@@ -51,7 +55,7 @@ namespace dawn_native {
uint8_t** mappedPointer);
uint64_t GetSize() const;
- dawn::BufferUsage GetUsage() const;
+ wgpu::BufferUsage GetUsage() const;
MaybeError MapAtCreation(uint8_t** mappedPointer);
@@ -59,8 +63,8 @@ namespace dawn_native {
// Dawn API
void SetSubData(uint32_t start, uint32_t count, const void* data);
- void MapReadAsync(DawnBufferMapReadCallback callback, void* userdata);
- void MapWriteAsync(DawnBufferMapWriteCallback callback, void* userdata);
+ void MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata);
+ void MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata);
void Unmap();
void Destroy();
@@ -68,11 +72,11 @@ namespace dawn_native {
BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
void CallMapReadCallback(uint32_t serial,
- DawnBufferMapAsyncStatus status,
+ WGPUBufferMapAsyncStatus status,
const void* pointer,
uint32_t dataLength);
void CallMapWriteCallback(uint32_t serial,
- DawnBufferMapAsyncStatus status,
+ WGPUBufferMapAsyncStatus status,
void* pointer,
uint32_t dataLength);
@@ -90,15 +94,15 @@ namespace dawn_native {
MaybeError CopyFromStagingBuffer();
MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const;
- MaybeError ValidateMap(dawn::BufferUsage requiredUsage) const;
+ MaybeError ValidateMap(wgpu::BufferUsage requiredUsage) const;
MaybeError ValidateUnmap() const;
MaybeError ValidateDestroy() const;
uint64_t mSize = 0;
- dawn::BufferUsage mUsage = dawn::BufferUsage::None;
+ wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
- DawnBufferMapReadCallback mMapReadCallback = nullptr;
- DawnBufferMapWriteCallback mMapWriteCallback = nullptr;
+ WGPUBufferMapReadCallback mMapReadCallback = nullptr;
+ WGPUBufferMapWriteCallback mMapWriteCallback = nullptr;
void* mMapUserdata = 0;
uint32_t mMapSerial = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp b/chromium/third_party/dawn/src/dawn_native/CachedObject.cpp
index 287323813bf..b91baed1675 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CachedObject.cpp
@@ -12,15 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/CachedObject.h"
-namespace dawn_native { namespace vulkan {
+namespace dawn_native {
- ResourceMemory::ResourceMemory(VkDeviceMemory memory) : mMemory(memory) {
+ bool CachedObject::IsCachedReference() const {
+ return mIsCachedReference;
}
- VkDeviceMemory ResourceMemory::GetMemory() const {
- return mMemory;
+ void CachedObject::SetIsCachedReference() {
+ mIsCachedReference = true;
}
-}} // namespace dawn_native::vulkan \ No newline at end of file
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CachedObject.h b/chromium/third_party/dawn/src/dawn_native/CachedObject.h
new file mode 100644
index 00000000000..b498b917713
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/CachedObject.h
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_CACHED_OBJECT_H_
+#define DAWNNATIVE_CACHED_OBJECT_H_
+
+#include "dawn_native/ObjectBase.h"
+
+namespace dawn_native {
+
+ // Some objects are cached so that instead of creating new duplicate objects,
+ // we increase the refcount of an existing object.
+ // When an object is successfully created, the device should call
+ // SetIsCachedReference() and insert the object into the cache.
+ class CachedObject : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+
+ bool IsCachedReference() const;
+
+ private:
+ friend class DeviceBase;
+ void SetIsCachedReference();
+
+ bool mIsCachedReference = false;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_CACHED_OBJECT_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index 79e9479de18..c042d50db33 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -19,8 +19,7 @@
namespace dawn_native {
- CommandBufferBase::CommandBufferBase(CommandEncoderBase* encoder,
- const CommandBufferDescriptor*)
+ CommandBufferBase::CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor*)
: ObjectBase(encoder->GetDevice()), mResourceUsages(encoder->AcquireResourceUsages()) {
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
index f1b5916c525..65650bd4979 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
@@ -25,7 +25,7 @@ namespace dawn_native {
class CommandBufferBase : public ObjectBase {
public:
- CommandBufferBase(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
static CommandBufferBase* MakeError(DeviceBase* device);
const CommandBufferResourceUsage& GetResourceUsages() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
index 943b67be75e..d6ef68ba605 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.cpp
@@ -106,8 +106,9 @@ namespace dawn_native {
if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
ASSERT(mLastRenderPipeline != nullptr);
- auto requiredInputs = mLastRenderPipeline->GetInputsSetMask();
- if ((mInputsSet & requiredInputs) == requiredInputs) {
+ const std::bitset<kMaxVertexBuffers>& requiredVertexBuffers =
+ mLastRenderPipeline->GetVertexBufferSlotsUsed();
+ if ((mVertexBufferSlotsUsed & requiredVertexBuffers) == requiredVertexBuffers) {
mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
}
@@ -153,7 +154,7 @@ namespace dawn_native {
}
void CommandBufferStateTracker::SetVertexBuffer(uint32_t slot) {
- mInputsSet.set(slot);
+ mVertexBufferSlotsUsed.set(slot);
}
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
index 5be9dcae9ef..50e5107c971 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBufferStateTracker.h
@@ -53,7 +53,7 @@ namespace dawn_native {
ValidationAspects mAspects;
std::array<BindGroupBase*, kMaxBindGroups> mBindgroups = {};
- std::bitset<kMaxVertexBuffers> mInputsSet;
+ std::bitset<kMaxVertexBuffers> mVertexBufferSlotsUsed;
PipelineLayoutBase* mLastPipelineLayout = nullptr;
RenderPipelineBase* mLastRenderPipeline = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index e0a9cd2e4ce..e0de3a215a7 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -24,9 +24,9 @@
#include "dawn_native/ComputePassEncoder.h"
#include "dawn_native/Device.h"
#include "dawn_native/ErrorData.h"
-#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/RenderPassEncoder.h"
#include "dawn_native/RenderPipeline.h"
+#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
#include <map>
@@ -242,8 +242,8 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateCanUseAs(BufferBase* buffer, dawn::BufferUsage usage) {
- ASSERT(HasZeroOrOneBits(usage));
+ MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
if (!(buffer->GetUsage() & usage)) {
return DAWN_VALIDATION_ERROR("buffer doesn't have the required usage.");
}
@@ -251,8 +251,8 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateCanUseAs(TextureBase* texture, dawn::TextureUsage usage) {
- ASSERT(HasZeroOrOneBits(usage));
+ MaybeError ValidateCanUseAs(const TextureBase* texture, wgpu::TextureUsage usage) {
+ ASSERT(wgpu::HasZeroOrOneBits(usage));
if (!(texture->GetUsage() & usage)) {
return DAWN_VALIDATION_ERROR("texture doesn't have the required usage.");
}
@@ -352,7 +352,7 @@ namespace dawn_native {
"The size of the resolve target must be the same as the color attachment");
}
- dawn::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+ wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
if (resolveTargetFormat != attachment->GetFormat().format) {
return DAWN_VALIDATION_ERROR(
"The format of the resolve target must be the same as the color attachment");
@@ -461,24 +461,23 @@ namespace dawn_native {
} // namespace
- CommandEncoderBase::CommandEncoderBase(DeviceBase* device, const CommandEncoderDescriptor*)
+ CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor*)
: ObjectBase(device), mEncodingContext(device, this) {
}
- CommandBufferResourceUsage CommandEncoderBase::AcquireResourceUsages() {
- ASSERT(!mWereResourceUsagesAcquired);
- mWereResourceUsagesAcquired = true;
- return std::move(mResourceUsages);
+ CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
+ return CommandBufferResourceUsage{mEncodingContext.AcquirePassUsages(),
+ std::move(mTopLevelBuffers),
+ std::move(mTopLevelTextures)};
}
- CommandIterator CommandEncoderBase::AcquireCommands() {
+ CommandIterator CommandEncoder::AcquireCommands() {
return mEncodingContext.AcquireCommands();
}
// Implementation of the API's command recording methods
- ComputePassEncoderBase* CommandEncoderBase::BeginComputePass(
- const ComputePassDescriptor* descriptor) {
+ ComputePassEncoder* CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
bool success =
@@ -491,19 +490,19 @@ namespace dawn_native {
});
if (success) {
- ComputePassEncoderBase* passEncoder =
- new ComputePassEncoderBase(device, this, &mEncodingContext);
+ ComputePassEncoder* passEncoder =
+ new ComputePassEncoder(device, this, &mEncodingContext);
mEncodingContext.EnterPass(passEncoder);
return passEncoder;
}
- return ComputePassEncoderBase::MakeError(device, this, &mEncodingContext);
+ return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
}
- RenderPassEncoderBase* CommandEncoderBase::BeginRenderPass(
- const RenderPassDescriptor* descriptor) {
+ RenderPassEncoder* CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
DeviceBase* device = GetDevice();
+ PassResourceUsageTracker usageTracker;
bool success =
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
uint32_t width = 0;
@@ -521,18 +520,29 @@ namespace dawn_native {
cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
for (uint32_t i : IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
- cmd->colorAttachments[i].view = descriptor->colorAttachments[i].attachment;
- cmd->colorAttachments[i].resolveTarget =
- descriptor->colorAttachments[i].resolveTarget;
+ TextureViewBase* view = descriptor->colorAttachments[i].attachment;
+ TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
+
+ cmd->colorAttachments[i].view = view;
+ cmd->colorAttachments[i].resolveTarget = resolveTarget;
cmd->colorAttachments[i].loadOp = descriptor->colorAttachments[i].loadOp;
cmd->colorAttachments[i].storeOp = descriptor->colorAttachments[i].storeOp;
cmd->colorAttachments[i].clearColor =
descriptor->colorAttachments[i].clearColor;
+
+ usageTracker.TextureUsedAs(view->GetTexture(),
+ wgpu::TextureUsage::OutputAttachment);
+
+ if (resolveTarget != nullptr) {
+ usageTracker.TextureUsedAs(resolveTarget->GetTexture(),
+ wgpu::TextureUsage::OutputAttachment);
+ }
}
if (cmd->attachmentState->HasDepthStencilAttachment()) {
- cmd->depthStencilAttachment.view =
- descriptor->depthStencilAttachment->attachment;
+ TextureViewBase* view = descriptor->depthStencilAttachment->attachment;
+
+ cmd->depthStencilAttachment.view = view;
cmd->depthStencilAttachment.clearDepth =
descriptor->depthStencilAttachment->clearDepth;
cmd->depthStencilAttachment.clearStencil =
@@ -545,6 +555,9 @@ namespace dawn_native {
descriptor->depthStencilAttachment->stencilLoadOp;
cmd->depthStencilAttachment.stencilStoreOp =
descriptor->depthStencilAttachment->stencilStoreOp;
+
+ usageTracker.TextureUsedAs(view->GetTexture(),
+ wgpu::TextureUsage::OutputAttachment);
}
cmd->width = width;
@@ -554,20 +567,20 @@ namespace dawn_native {
});
if (success) {
- RenderPassEncoderBase* passEncoder =
- new RenderPassEncoderBase(device, this, &mEncodingContext);
+ RenderPassEncoder* passEncoder =
+ new RenderPassEncoder(device, this, &mEncodingContext, std::move(usageTracker));
mEncodingContext.EnterPass(passEncoder);
return passEncoder;
}
- return RenderPassEncoderBase::MakeError(device, this, &mEncodingContext);
+ return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
}
- void CommandEncoderBase::CopyBufferToBuffer(BufferBase* source,
- uint64_t sourceOffset,
- BufferBase* destination,
- uint64_t destinationOffset,
- uint64_t size) {
+ void CommandEncoder::CopyBufferToBuffer(BufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(source));
DAWN_TRY(GetDevice()->ValidateObject(destination));
@@ -580,13 +593,17 @@ namespace dawn_native {
copy->destinationOffset = destinationOffset;
copy->size = size;
+ if (GetDevice()->IsValidationEnabled()) {
+ mTopLevelBuffers.insert(source);
+ mTopLevelBuffers.insert(destination);
+ }
return {};
});
}
- void CommandEncoderBase::CopyBufferToTexture(const BufferCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::CopyBufferToTexture(const BufferCopyView* source,
+ const TextureCopyView* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(source->buffer));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
@@ -612,13 +629,17 @@ namespace dawn_native {
copy->source.imageHeight = source->imageHeight;
}
+ if (GetDevice()->IsValidationEnabled()) {
+ mTopLevelBuffers.insert(source->buffer);
+ mTopLevelTextures.insert(destination->texture);
+ }
return {};
});
}
- void CommandEncoderBase::CopyTextureToBuffer(const TextureCopyView* source,
- const BufferCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::CopyTextureToBuffer(const TextureCopyView* source,
+ const BufferCopyView* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->buffer));
@@ -644,13 +665,17 @@ namespace dawn_native {
copy->destination.imageHeight = destination->imageHeight;
}
+ if (GetDevice()->IsValidationEnabled()) {
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelBuffers.insert(destination->buffer);
+ }
return {};
});
}
- void CommandEncoderBase::CopyTextureToTexture(const TextureCopyView* source,
- const TextureCopyView* destination,
- const Extent3D* copySize) {
+ void CommandEncoder::CopyTextureToTexture(const TextureCopyView* source,
+ const TextureCopyView* destination,
+ const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
@@ -667,11 +692,15 @@ namespace dawn_native {
copy->destination.arrayLayer = destination->arrayLayer;
copy->copySize = *copySize;
+ if (GetDevice()->IsValidationEnabled()) {
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelTextures.insert(destination->texture);
+ }
return {};
});
}
- void CommandEncoderBase::InsertDebugMarker(const char* groupLabel) {
+ void CommandEncoder::InsertDebugMarker(const char* groupLabel) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
InsertDebugMarkerCmd* cmd =
allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
@@ -684,7 +713,7 @@ namespace dawn_native {
});
}
- void CommandEncoderBase::PopDebugGroup() {
+ void CommandEncoder::PopDebugGroup() {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
@@ -692,7 +721,7 @@ namespace dawn_native {
});
}
- void CommandEncoderBase::PushDebugGroup(const char* groupLabel) {
+ void CommandEncoder::PushDebugGroup(const char* groupLabel) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
PushDebugGroupCmd* cmd =
allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
@@ -705,48 +734,50 @@ namespace dawn_native {
});
}
- CommandBufferBase* CommandEncoderBase::Finish(const CommandBufferDescriptor* descriptor) {
- TRACE_EVENT0(GetDevice()->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "CommandEncoderBase::Finish");
- if (GetDevice()->ConsumedError(ValidateFinish(descriptor))) {
- // Even if finish validation fails, it is now invalid to call any encoding commands on
- // this object, so we set its state to finished.
- return CommandBufferBase::MakeError(GetDevice());
+ CommandBufferBase* CommandEncoder::Finish(const CommandBufferDescriptor* descriptor) {
+ DeviceBase* device = GetDevice();
+ // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+ // state of the encoding context. The internal state is set to finished, and subsequent
+ // calls to encode commands will generate errors.
+ if (device->ConsumedError(mEncodingContext.Finish()) ||
+ (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateFinish(mEncodingContext.GetIterator(),
+ mEncodingContext.GetPassUsages())))) {
+ return CommandBufferBase::MakeError(device);
}
ASSERT(!IsError());
-
- return GetDevice()->CreateCommandBuffer(this, descriptor);
+ return device->CreateCommandBuffer(this, descriptor);
}
// Implementation of the command buffer validation that can be precomputed before submit
-
- MaybeError CommandEncoderBase::ValidateFinish(const CommandBufferDescriptor*) {
+ MaybeError CommandEncoder::ValidateFinish(CommandIterator* commands,
+ const PerPassUsages& perPassUsages) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
DAWN_TRY(GetDevice()->ValidateObject(this));
- // Even if Finish() validation fails, calling it will mutate the internal state of the
- // encoding context. Subsequent calls to encode commands will generate errors.
- DAWN_TRY(mEncodingContext.Finish());
+ for (const PassResourceUsage& passUsage : perPassUsages) {
+ DAWN_TRY(ValidatePassResourceUsage(passUsage));
+ }
uint64_t debugGroupStackSize = 0;
- CommandIterator* commands = mEncodingContext.GetIterator();
commands->Reset();
-
Command type;
while (commands->NextCommandId(&type)) {
switch (type) {
case Command::BeginComputePass: {
commands->NextCommand<BeginComputePassCmd>();
- DAWN_TRY(ValidateComputePass(commands, &mResourceUsages.perPass));
+ DAWN_TRY(ValidateComputePass(commands));
} break;
case Command::BeginRenderPass: {
- BeginRenderPassCmd* cmd = commands->NextCommand<BeginRenderPassCmd>();
- DAWN_TRY(ValidateRenderPass(commands, cmd, &mResourceUsages.perPass));
+ const BeginRenderPassCmd* cmd = commands->NextCommand<BeginRenderPassCmd>();
+ DAWN_TRY(ValidateRenderPass(commands, cmd));
} break;
case Command::CopyBufferToBuffer: {
- CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
+ const CopyBufferToBufferCmd* copy =
+ commands->NextCommand<CopyBufferToBufferCmd>();
DAWN_TRY(
ValidateCopySizeFitsInBuffer(copy->source, copy->sourceOffset, copy->size));
@@ -755,15 +786,13 @@ namespace dawn_native {
DAWN_TRY(ValidateB2BCopySizeAlignment(copy->size, copy->sourceOffset,
copy->destinationOffset));
- DAWN_TRY(ValidateCanUseAs(copy->source.Get(), dawn::BufferUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(copy->destination.Get(), dawn::BufferUsage::CopyDst));
-
- mResourceUsages.topLevelBuffers.insert(copy->source.Get());
- mResourceUsages.topLevelBuffers.insert(copy->destination.Get());
+ DAWN_TRY(ValidateCanUseAs(copy->source.Get(), wgpu::BufferUsage::CopySrc));
+ DAWN_TRY(ValidateCanUseAs(copy->destination.Get(), wgpu::BufferUsage::CopyDst));
} break;
case Command::CopyBufferToTexture: {
- CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
+ const CopyBufferToTextureCmd* copy =
+ commands->NextCommand<CopyBufferToTextureCmd>();
DAWN_TRY(
ValidateTextureSampleCountInCopyCommands(copy->destination.texture.Get()));
@@ -789,16 +818,14 @@ namespace dawn_native {
copy->destination.texture->GetFormat()));
DAWN_TRY(
- ValidateCanUseAs(copy->source.buffer.Get(), dawn::BufferUsage::CopySrc));
+ ValidateCanUseAs(copy->source.buffer.Get(), wgpu::BufferUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.texture.Get(),
- dawn::TextureUsage::CopyDst));
-
- mResourceUsages.topLevelBuffers.insert(copy->source.buffer.Get());
- mResourceUsages.topLevelTextures.insert(copy->destination.texture.Get());
+ wgpu::TextureUsage::CopyDst));
} break;
case Command::CopyTextureToBuffer: {
- CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
+ const CopyTextureToBufferCmd* copy =
+ commands->NextCommand<CopyTextureToBufferCmd>();
DAWN_TRY(ValidateTextureSampleCountInCopyCommands(copy->source.texture.Get()));
@@ -824,16 +851,13 @@ namespace dawn_native {
copy->source.texture->GetFormat()));
DAWN_TRY(
- ValidateCanUseAs(copy->source.texture.Get(), dawn::TextureUsage::CopySrc));
+ ValidateCanUseAs(copy->source.texture.Get(), wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.buffer.Get(),
- dawn::BufferUsage::CopyDst));
-
- mResourceUsages.topLevelTextures.insert(copy->source.texture.Get());
- mResourceUsages.topLevelBuffers.insert(copy->destination.buffer.Get());
+ wgpu::BufferUsage::CopyDst));
} break;
case Command::CopyTextureToTexture: {
- CopyTextureToTextureCmd* copy =
+ const CopyTextureToTextureCmd* copy =
commands->NextCommand<CopyTextureToTextureCmd>();
DAWN_TRY(ValidateTextureToTextureCopyRestrictions(
@@ -852,16 +876,13 @@ namespace dawn_native {
DAWN_TRY(ValidateCopySizeFitsInTexture(copy->destination, copy->copySize));
DAWN_TRY(
- ValidateCanUseAs(copy->source.texture.Get(), dawn::TextureUsage::CopySrc));
+ ValidateCanUseAs(copy->source.texture.Get(), wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(copy->destination.texture.Get(),
- dawn::TextureUsage::CopyDst));
-
- mResourceUsages.topLevelTextures.insert(copy->source.texture.Get());
- mResourceUsages.topLevelTextures.insert(copy->destination.texture.Get());
+ wgpu::TextureUsage::CopyDst));
} break;
case Command::InsertDebugMarker: {
- InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+ const InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
} break;
@@ -872,7 +893,7 @@ namespace dawn_native {
} break;
case Command::PushDebugGroup: {
- PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+ const PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
debugGroupStackSize++;
} break;
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index 6d39ed3ddc0..2c89c4bd777 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -28,16 +28,16 @@ namespace dawn_native {
struct BeginRenderPassCmd;
- class CommandEncoderBase : public ObjectBase {
+ class CommandEncoder final : public ObjectBase {
public:
- CommandEncoderBase(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+ CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
CommandIterator AcquireCommands();
CommandBufferResourceUsage AcquireResourceUsages();
// Dawn API
- ComputePassEncoderBase* BeginComputePass(const ComputePassDescriptor* descriptor);
- RenderPassEncoderBase* BeginRenderPass(const RenderPassDescriptor* descriptor);
+ ComputePassEncoder* BeginComputePass(const ComputePassDescriptor* descriptor);
+ RenderPassEncoder* BeginRenderPass(const RenderPassDescriptor* descriptor);
void CopyBufferToBuffer(BufferBase* source,
uint64_t sourceOffset,
@@ -61,12 +61,12 @@ namespace dawn_native {
CommandBufferBase* Finish(const CommandBufferDescriptor* descriptor);
private:
- MaybeError ValidateFinish(const CommandBufferDescriptor* descriptor);
+ MaybeError ValidateFinish(CommandIterator* commands,
+ const PerPassUsages& perPassUsages) const;
EncodingContext mEncodingContext;
-
- bool mWereResourceUsagesAcquired = false;
- CommandBufferResourceUsage mResourceUsages;
+ std::set<BufferBase*> mTopLevelBuffers;
+ std::set<TextureBase*> mTopLevelTextures;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index c249a337899..dc7a3ccec54 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -16,9 +16,10 @@
#include "common/BitSetIterator.h"
#include "dawn_native/BindGroup.h"
+#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Commands.h"
-#include "dawn_native/PassResourceUsageTracker.h"
+#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
@@ -26,43 +27,8 @@ namespace dawn_native {
namespace {
- void TrackBindGroupResourceUsage(BindGroupBase* group,
- PassResourceUsageTracker* usageTracker) {
- const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
-
- for (uint32_t i : IterateBitSet(layoutInfo.mask)) {
- dawn::BindingType type = layoutInfo.types[i];
-
- switch (type) {
- case dawn::BindingType::UniformBuffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
- usageTracker->BufferUsedAs(buffer, dawn::BufferUsage::Uniform);
- } break;
-
- case dawn::BindingType::StorageBuffer: {
- BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
- usageTracker->BufferUsedAs(buffer, dawn::BufferUsage::Storage);
- } break;
-
- case dawn::BindingType::SampledTexture: {
- TextureBase* texture = group->GetBindingAsTextureView(i)->GetTexture();
- usageTracker->TextureUsedAs(texture, dawn::TextureUsage::Sampled);
- } break;
-
- case dawn::BindingType::Sampler:
- break;
-
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
- UNREACHABLE();
- break;
- }
- }
- }
-
inline MaybeError ValidateRenderBundleCommand(CommandIterator* commands,
Command type,
- PassResourceUsageTracker* usageTracker,
CommandBufferStateTracker* commandBufferState,
const AttachmentState* attachmentState,
uint64_t* debugGroupStackSize,
@@ -79,17 +45,13 @@ namespace dawn_native {
} break;
case Command::DrawIndirect: {
- DrawIndirectCmd* cmd = commands->NextCommand<DrawIndirectCmd>();
+ commands->NextCommand<DrawIndirectCmd>();
DAWN_TRY(commandBufferState->ValidateCanDraw());
- usageTracker->BufferUsedAs(cmd->indirectBuffer.Get(),
- dawn::BufferUsage::Indirect);
} break;
case Command::DrawIndexedIndirect: {
- DrawIndexedIndirectCmd* cmd = commands->NextCommand<DrawIndexedIndirectCmd>();
+ commands->NextCommand<DrawIndexedIndirectCmd>();
DAWN_TRY(commandBufferState->ValidateCanDrawIndexed());
- usageTracker->BufferUsedAs(cmd->indirectBuffer.Get(),
- dawn::BufferUsage::Indirect);
} break;
case Command::InsertDebugMarker: {
@@ -122,24 +84,19 @@ namespace dawn_native {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- TrackBindGroupResourceUsage(cmd->group.Get(), usageTracker);
commandBufferState->SetBindGroup(cmd->index, cmd->group.Get());
} break;
case Command::SetIndexBuffer: {
- SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
-
- usageTracker->BufferUsedAs(cmd->buffer.Get(), dawn::BufferUsage::Index);
+ commands->NextCommand<SetIndexBufferCmd>();
commandBufferState->SetIndexBuffer();
} break;
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
-
- usageTracker->BufferUsedAs(cmd->buffer.Get(), dawn::BufferUsage::Vertex);
commandBufferState->SetVertexBuffer(cmd->slot);
} break;
@@ -167,63 +124,31 @@ namespace dawn_native {
}
MaybeError ValidateRenderBundle(CommandIterator* commands,
- const AttachmentState* attachmentState,
- PassResourceUsage* resourceUsage) {
- PassResourceUsageTracker usageTracker;
+ const AttachmentState* attachmentState) {
CommandBufferStateTracker commandBufferState;
uint64_t debugGroupStackSize = 0;
Command type;
while (commands->NextCommandId(&type)) {
- DAWN_TRY(ValidateRenderBundleCommand(commands, type, &usageTracker, &commandBufferState,
+ DAWN_TRY(ValidateRenderBundleCommand(commands, type, &commandBufferState,
attachmentState, &debugGroupStackSize,
"Command disallowed inside a render bundle"));
}
DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
- DAWN_TRY(usageTracker.ValidateRenderPassUsages());
- ASSERT(resourceUsage != nullptr);
- *resourceUsage = usageTracker.AcquireResourceUsage();
-
return {};
}
- MaybeError ValidateRenderPass(CommandIterator* commands,
- BeginRenderPassCmd* renderPass,
- std::vector<PassResourceUsage>* perPassResourceUsages) {
- PassResourceUsageTracker usageTracker;
+ MaybeError ValidateRenderPass(CommandIterator* commands, const BeginRenderPassCmd* renderPass) {
CommandBufferStateTracker commandBufferState;
uint64_t debugGroupStackSize = 0;
- // Track usage of the render pass attachments
- for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- RenderPassColorAttachmentInfo* colorAttachment = &renderPass->colorAttachments[i];
- TextureBase* texture = colorAttachment->view->GetTexture();
- usageTracker.TextureUsedAs(texture, dawn::TextureUsage::OutputAttachment);
-
- TextureViewBase* resolveTarget = colorAttachment->resolveTarget.Get();
- if (resolveTarget != nullptr) {
- usageTracker.TextureUsedAs(resolveTarget->GetTexture(),
- dawn::TextureUsage::OutputAttachment);
- }
- }
-
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- TextureBase* texture = renderPass->depthStencilAttachment.view->GetTexture();
- usageTracker.TextureUsedAs(texture, dawn::TextureUsage::OutputAttachment);
- }
-
Command type;
while (commands->NextCommandId(&type)) {
switch (type) {
case Command::EndRenderPass: {
commands->NextCommand<EndRenderPassCmd>();
-
DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
- DAWN_TRY(usageTracker.ValidateRenderPassUsages());
- ASSERT(perPassResourceUsages != nullptr);
- perPassResourceUsages->push_back(usageTracker.AcquireResourceUsage());
-
return {};
} break;
@@ -236,15 +161,6 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR(
"Render bundle is not compatible with render pass");
}
-
- const PassResourceUsage& usages = bundles[i]->GetResourceUsage();
- for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
- usageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
- }
-
- for (uint32_t i = 0; i < usages.textures.size(); ++i) {
- usageTracker.TextureUsedAs(usages.textures[i], usages.textureUsages[i]);
- }
}
if (cmd->count > 0) {
@@ -272,9 +188,8 @@ namespace dawn_native {
default:
DAWN_TRY(ValidateRenderBundleCommand(
- commands, type, &usageTracker, &commandBufferState,
- renderPass->attachmentState.Get(), &debugGroupStackSize,
- "Command disallowed inside a render pass"));
+ commands, type, &commandBufferState, renderPass->attachmentState.Get(),
+ &debugGroupStackSize, "Command disallowed inside a render pass"));
}
}
@@ -282,9 +197,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Unfinished render pass");
}
- MaybeError ValidateComputePass(CommandIterator* commands,
- std::vector<PassResourceUsage>* perPassResourceUsages) {
- PassResourceUsageTracker usageTracker;
+ MaybeError ValidateComputePass(CommandIterator* commands) {
CommandBufferStateTracker commandBufferState;
uint64_t debugGroupStackSize = 0;
@@ -293,11 +206,7 @@ namespace dawn_native {
switch (type) {
case Command::EndComputePass: {
commands->NextCommand<EndComputePassCmd>();
-
DAWN_TRY(ValidateFinalDebugGroupStackSize(debugGroupStackSize));
- DAWN_TRY(usageTracker.ValidateComputePassUsages());
- ASSERT(perPassResourceUsages != nullptr);
- perPassResourceUsages->push_back(usageTracker.AcquireResourceUsage());
return {};
} break;
@@ -307,10 +216,8 @@ namespace dawn_native {
} break;
case Command::DispatchIndirect: {
- DispatchIndirectCmd* cmd = commands->NextCommand<DispatchIndirectCmd>();
+ commands->NextCommand<DispatchIndirectCmd>();
DAWN_TRY(commandBufferState.ValidateCanDispatch());
- usageTracker.BufferUsedAs(cmd->indirectBuffer.Get(),
- dawn::BufferUsage::Indirect);
} break;
case Command::InsertDebugMarker: {
@@ -339,10 +246,8 @@ namespace dawn_native {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
-
- TrackBindGroupResourceUsage(cmd->group.Get(), &usageTracker);
commandBufferState.SetBindGroup(cmd->index, cmd->group.Get());
} break;
@@ -355,4 +260,46 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Unfinished compute pass");
}
+ // Performs the per-pass usage validation checks
+ // This will eventually need to differentiate between render and compute passes.
+ // It will be valid to use a buffer both as uniform and storage in the same compute pass.
+ MaybeError ValidatePassResourceUsage(const PassResourceUsage& pass) {
+ // Buffers can only be used as single-write or multiple read.
+ for (size_t i = 0; i < pass.buffers.size(); ++i) {
+ const BufferBase* buffer = pass.buffers[i];
+ wgpu::BufferUsage usage = pass.bufferUsages[i];
+
+ if (usage & ~buffer->GetUsage()) {
+ return DAWN_VALIDATION_ERROR("Buffer missing usage for the pass");
+ }
+
+ bool readOnly = (usage & kReadOnlyBufferUsages) == usage;
+ bool singleUse = wgpu::HasZeroOrOneBits(usage);
+
+ if (!readOnly && !singleUse) {
+ return DAWN_VALIDATION_ERROR(
+ "Buffer used as writable usage and another usage in pass");
+ }
+ }
+
+ // Textures can only be used as single-write or multiple read.
+ // TODO(cwallez@chromium.org): implement per-subresource tracking
+ for (size_t i = 0; i < pass.textures.size(); ++i) {
+ const TextureBase* texture = pass.textures[i];
+ wgpu::TextureUsage usage = pass.textureUsages[i];
+
+ if (usage & ~texture->GetUsage()) {
+ return DAWN_VALIDATION_ERROR("Texture missing usage for the pass");
+ }
+
+ // For textures the only read-only usage in a pass is Sampled, so checking the
+ // usage constraint simplifies to checking a single usage bit is set.
+ if (!wgpu::HasZeroOrOneBits(usage)) {
+ return DAWN_VALIDATION_ERROR("Texture used with more than one usage in pass");
+ }
+ }
+
+ return {};
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index b5a14934abd..d649ce32eeb 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -30,13 +30,11 @@ namespace dawn_native {
MaybeError ValidateFinalDebugGroupStackSize(uint64_t debugGroupStackSize);
MaybeError ValidateRenderBundle(CommandIterator* commands,
- const AttachmentState* attachmentState,
- PassResourceUsage* resourceUsage);
- MaybeError ValidateRenderPass(CommandIterator* commands,
- BeginRenderPassCmd* renderPass,
- std::vector<PassResourceUsage>* perPassResourceUsages);
- MaybeError ValidateComputePass(CommandIterator* commands,
- std::vector<PassResourceUsage>* perPassResourceUsages);
+ const AttachmentState* attachmentState);
+ MaybeError ValidateRenderPass(CommandIterator* commands, const BeginRenderPassCmd* renderPass);
+ MaybeError ValidateComputePass(CommandIterator* commands);
+
+ MaybeError ValidatePassResourceUsage(const PassResourceUsage& usage);
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index 0ac1f033b6b..3810089066e 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -136,7 +136,7 @@ namespace dawn_native {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
- commands->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
cmd->~SetBindGroupCmd();
} break;
@@ -254,9 +254,12 @@ namespace dawn_native {
commands->NextCommand<SetBlendColorCmd>();
break;
- case Command::SetBindGroup:
- commands->NextCommand<SetBindGroupCmd>();
- break;
+ case Command::SetBindGroup: {
+ SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+ if (cmd->dynamicOffsetCount > 0) {
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+ }
+ } break;
case Command::SetIndexBuffer:
commands->NextCommand<SetIndexBufferCmd>();
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index eeaf9dc2df5..05632d79520 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -66,17 +66,17 @@ namespace dawn_native {
struct RenderPassColorAttachmentInfo {
Ref<TextureViewBase> view;
Ref<TextureViewBase> resolveTarget;
- dawn::LoadOp loadOp;
- dawn::StoreOp storeOp;
+ wgpu::LoadOp loadOp;
+ wgpu::StoreOp storeOp;
dawn_native::Color clearColor;
};
struct RenderPassDepthStencilAttachmentInfo {
Ref<TextureViewBase> view;
- dawn::LoadOp depthLoadOp;
- dawn::StoreOp depthStoreOp;
- dawn::LoadOp stencilLoadOp;
- dawn::StoreOp stencilStoreOp;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::StoreOp depthStoreOp;
+ wgpu::LoadOp stencilLoadOp;
+ wgpu::StoreOp stencilStoreOp;
float clearDepth;
uint32_t clearStencil;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
index 2a6eb42d354..cd88c875a11 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
@@ -22,38 +22,37 @@
namespace dawn_native {
- ComputePassEncoderBase::ComputePassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext)
+ ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext)
: ProgrammablePassEncoder(device, encodingContext), mCommandEncoder(commandEncoder) {
}
- ComputePassEncoderBase::ComputePassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
+ ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
: ProgrammablePassEncoder(device, encodingContext, errorTag),
mCommandEncoder(commandEncoder) {
}
- ComputePassEncoderBase* ComputePassEncoderBase::MakeError(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext) {
- return new ComputePassEncoderBase(device, commandEncoder, encodingContext,
- ObjectBase::kError);
+ ComputePassEncoder* ComputePassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
}
- void ComputePassEncoderBase::EndPass() {
+ void ComputePassEncoder::EndPass() {
if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
return {};
})) {
- mEncodingContext->ExitPass(this);
+ mEncodingContext->ExitPass(this, mUsageTracker.AcquireResourceUsage());
}
}
- void ComputePassEncoderBase::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
+ void ComputePassEncoder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
dispatch->x = x;
@@ -64,8 +63,7 @@ namespace dawn_native {
});
}
- void ComputePassEncoderBase::DispatchIndirect(BufferBase* indirectBuffer,
- uint64_t indirectOffset) {
+ void ComputePassEncoder::DispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
@@ -79,11 +77,13 @@ namespace dawn_native {
dispatch->indirectBuffer = indirectBuffer;
dispatch->indirectOffset = indirectOffset;
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
return {};
});
}
- void ComputePassEncoderBase::SetPipeline(ComputePipelineBase* pipeline) {
+ void ComputePassEncoder::SetPipeline(ComputePipelineBase* pipeline) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
DAWN_TRY(GetDevice()->ValidateObject(pipeline));
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
index 8e4c2f68362..f790aad51ee 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
@@ -20,19 +20,15 @@
namespace dawn_native {
- // This is called ComputePassEncoderBase to match the code generator expectations. Note that it
- // is a pure frontend type to record in its parent CommandEncoder and never has a backend
- // implementation.
- // TODO(cwallez@chromium.org): Remove that generator limitation and rename to ComputePassEncoder
- class ComputePassEncoderBase : public ProgrammablePassEncoder {
+ class ComputePassEncoder final : public ProgrammablePassEncoder {
public:
- ComputePassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext);
+ ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
- static ComputePassEncoderBase* MakeError(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext);
+ static ComputePassEncoder* MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
void EndPass();
@@ -41,15 +37,15 @@ namespace dawn_native {
void SetPipeline(ComputePipelineBase* pipeline);
protected:
- ComputePassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
+ ComputePassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
private:
// For render and compute passes, the encoding context is borrowed from the command encoder.
// Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoderBase> mCommandEncoder;
+ Ref<CommandEncoder> mCommandEncoder;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
index 6b207066a00..c1394c68f9d 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.cpp
@@ -25,7 +25,10 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(device->ValidateObject(descriptor->layout));
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+ }
+
DAWN_TRY(ValidateProgrammableStageDescriptor(
device, &descriptor->computeStage, descriptor->layout, SingleShaderStage::Compute));
return {};
@@ -34,12 +37,10 @@ namespace dawn_native {
// ComputePipelineBase
ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor,
- bool blueprint)
- : PipelineBase(device, descriptor->layout, dawn::ShaderStage::Compute),
+ const ComputePipelineDescriptor* descriptor)
+ : PipelineBase(device, descriptor->layout, wgpu::ShaderStage::Compute),
mModule(descriptor->computeStage.module),
- mEntryPoint(descriptor->computeStage.entryPoint),
- mIsBlueprint(blueprint) {
+ mEntryPoint(descriptor->computeStage.entryPoint) {
}
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -48,7 +49,7 @@ namespace dawn_native {
ComputePipelineBase::~ComputePipelineBase() {
// Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncacheComputePipeline(this);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
index 006c469d96a..43d7966568d 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePipeline.h
@@ -26,9 +26,7 @@ namespace dawn_native {
class ComputePipelineBase : public PipelineBase {
public:
- ComputePipelineBase(DeviceBase* device,
- const ComputePipelineDescriptor* descriptor,
- bool blueprint = false);
+ ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
~ComputePipelineBase() override;
static ComputePipelineBase* MakeError(DeviceBase* device);
@@ -47,7 +45,6 @@ namespace dawn_native {
// TODO(cwallez@chromium.org): Store a crypto hash of the module instead.
Ref<ShaderModuleBase> mModule;
std::string mEntryPoint;
- bool mIsBlueprint = false;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index e63e3b8efd5..45fbb55ad9e 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -27,7 +27,7 @@ namespace dawn_native {
return GetProcsAutogen();
}
- std::vector<const char*> GetTogglesUsed(DawnDevice device) {
+ std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
const dawn_native::DeviceBase* deviceBase =
reinterpret_cast<const dawn_native::DeviceBase*>(device);
return deviceBase->GetTogglesUsed();
@@ -61,12 +61,16 @@ namespace dawn_native {
return supportedExtensionsSet.GetEnabledExtensionNames();
}
+ WGPUDeviceProperties Adapter::GetAdapterProperties() const {
+ return mImpl->GetAdapterProperties();
+ }
+
Adapter::operator bool() const {
return mImpl != nullptr;
}
- DawnDevice Adapter::CreateDevice(const DeviceDescriptor* deviceDescriptor) {
- return reinterpret_cast<DawnDevice>(mImpl->CreateDevice(deviceDescriptor));
+ WGPUDevice Adapter::CreateDevice(const DeviceDescriptor* deviceDescriptor) {
+ return reinterpret_cast<WGPUDevice>(mImpl->CreateDevice(deviceDescriptor));
}
// AdapterDiscoverOptionsBase
@@ -129,7 +133,7 @@ namespace dawn_native {
return mImpl->GetPlatform();
}
- size_t GetLazyClearCountForTesting(DawnDevice device) {
+ size_t GetLazyClearCountForTesting(WGPUDevice device) {
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
return deviceBase->GetLazyClearCountForTesting();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index bf36778d1ca..664b81d5912 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -94,16 +94,16 @@ namespace dawn_native {
ASSERT(mCaches->shaderModules.empty());
}
- void DeviceBase::HandleError(dawn::ErrorType type, const char* message) {
+ void DeviceBase::HandleError(wgpu::ErrorType type, const char* message) {
mCurrentErrorScope->HandleError(type, message);
}
- void DeviceBase::InjectError(dawn::ErrorType type, const char* message) {
+ void DeviceBase::InjectError(wgpu::ErrorType type, const char* message) {
if (ConsumedError(ValidateErrorType(type))) {
return;
}
- if (DAWN_UNLIKELY(type == dawn::ErrorType::NoError)) {
- HandleError(dawn::ErrorType::Validation, "Invalid injected error NoError");
+ if (DAWN_UNLIKELY(type == wgpu::ErrorType::NoError)) {
+ HandleError(wgpu::ErrorType::Validation, "Invalid injected error NoError");
return;
}
HandleError(type, message);
@@ -115,18 +115,18 @@ namespace dawn_native {
delete error;
}
- void DeviceBase::SetUncapturedErrorCallback(dawn::ErrorCallback callback, void* userdata) {
+ void DeviceBase::SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
mRootErrorScope->SetCallback(callback, userdata);
}
- void DeviceBase::PushErrorScope(dawn::ErrorFilter filter) {
+ void DeviceBase::PushErrorScope(wgpu::ErrorFilter filter) {
if (ConsumedError(ValidateErrorFilter(filter))) {
return;
}
mCurrentErrorScope = AcquireRef(new ErrorScope(filter, mCurrentErrorScope.Get()));
}
- bool DeviceBase::PopErrorScope(dawn::ErrorCallback callback, void* userdata) {
+ bool DeviceBase::PopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
if (DAWN_UNLIKELY(mCurrentErrorScope.Get() == mRootErrorScope.Get())) {
return false;
}
@@ -168,7 +168,7 @@ namespace dawn_native {
return mFenceSignalTracker.get();
}
- ResultOrError<const Format*> DeviceBase::GetInternalFormat(dawn::TextureFormat format) const {
+ ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
size_t index = ComputeFormatIndex(format);
if (index >= mFormatTable.size()) {
return DAWN_VALIDATION_ERROR("Unknown texture format");
@@ -182,7 +182,7 @@ namespace dawn_native {
return internalFormat;
}
- const Format& DeviceBase::GetValidInternalFormat(dawn::TextureFormat format) const {
+ const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
size_t index = ComputeFormatIndex(format);
ASSERT(index < mFormatTable.size());
ASSERT(mFormatTable[index].isSupported);
@@ -191,7 +191,7 @@ namespace dawn_native {
ResultOrError<BindGroupLayoutBase*> DeviceBase::GetOrCreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
- BindGroupLayoutBase blueprint(this, descriptor, true);
+ BindGroupLayoutBase blueprint(this, descriptor);
auto iter = mCaches->bindGroupLayouts.find(&blueprint);
if (iter != mCaches->bindGroupLayouts.end()) {
@@ -201,18 +201,20 @@ namespace dawn_native {
BindGroupLayoutBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateBindGroupLayoutImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->bindGroupLayouts.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
ASSERT(removedCount == 1);
}
ResultOrError<ComputePipelineBase*> DeviceBase::GetOrCreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
- ComputePipelineBase blueprint(this, descriptor, true);
+ ComputePipelineBase blueprint(this, descriptor);
auto iter = mCaches->computePipelines.find(&blueprint);
if (iter != mCaches->computePipelines.end()) {
@@ -222,18 +224,20 @@ namespace dawn_native {
ComputePipelineBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateComputePipelineImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->computePipelines.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->computePipelines.erase(obj);
ASSERT(removedCount == 1);
}
ResultOrError<PipelineLayoutBase*> DeviceBase::GetOrCreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor) {
- PipelineLayoutBase blueprint(this, descriptor, true);
+ PipelineLayoutBase blueprint(this, descriptor);
auto iter = mCaches->pipelineLayouts.find(&blueprint);
if (iter != mCaches->pipelineLayouts.end()) {
@@ -243,18 +247,20 @@ namespace dawn_native {
PipelineLayoutBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreatePipelineLayoutImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->pipelineLayouts.insert(backendObj);
return backendObj;
}
void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->pipelineLayouts.erase(obj);
ASSERT(removedCount == 1);
}
ResultOrError<RenderPipelineBase*> DeviceBase::GetOrCreateRenderPipeline(
const RenderPipelineDescriptor* descriptor) {
- RenderPipelineBase blueprint(this, descriptor, true);
+ RenderPipelineBase blueprint(this, descriptor);
auto iter = mCaches->renderPipelines.find(&blueprint);
if (iter != mCaches->renderPipelines.end()) {
@@ -264,18 +270,20 @@ namespace dawn_native {
RenderPipelineBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateRenderPipelineImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->renderPipelines.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->renderPipelines.erase(obj);
ASSERT(removedCount == 1);
}
ResultOrError<SamplerBase*> DeviceBase::GetOrCreateSampler(
const SamplerDescriptor* descriptor) {
- SamplerBase blueprint(this, descriptor, true);
+ SamplerBase blueprint(this, descriptor);
auto iter = mCaches->samplers.find(&blueprint);
if (iter != mCaches->samplers.end()) {
@@ -285,18 +293,20 @@ namespace dawn_native {
SamplerBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateSamplerImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->samplers.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheSampler(SamplerBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->samplers.erase(obj);
ASSERT(removedCount == 1);
}
ResultOrError<ShaderModuleBase*> DeviceBase::GetOrCreateShaderModule(
const ShaderModuleDescriptor* descriptor) {
- ShaderModuleBase blueprint(this, descriptor, true);
+ ShaderModuleBase blueprint(this, descriptor);
auto iter = mCaches->shaderModules.find(&blueprint);
if (iter != mCaches->shaderModules.end()) {
@@ -306,11 +316,13 @@ namespace dawn_native {
ShaderModuleBase* backendObj;
DAWN_TRY_ASSIGN(backendObj, CreateShaderModuleImpl(descriptor));
+ backendObj->SetIsCachedReference();
mCaches->shaderModules.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->shaderModules.erase(obj);
ASSERT(removedCount == 1);
}
@@ -323,6 +335,7 @@ namespace dawn_native {
}
Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
+ attachmentState->SetIsCachedReference();
mCaches->attachmentStates.insert(attachmentState.Get());
return attachmentState;
}
@@ -346,6 +359,7 @@ namespace dawn_native {
}
void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+ ASSERT(obj->IsCachedReference());
size_t removedCount = mCaches->attachmentStates.erase(obj);
ASSERT(removedCount == 1);
}
@@ -380,7 +394,7 @@ namespace dawn_native {
return result;
}
- DawnCreateBufferMappedResult DeviceBase::CreateBufferMapped(
+ WGPUCreateBufferMappedResult DeviceBase::CreateBufferMapped(
const BufferDescriptor* descriptor) {
BufferBase* buffer = nullptr;
uint8_t* data = nullptr;
@@ -405,21 +419,21 @@ namespace dawn_native {
memset(data, 0, size);
}
- DawnCreateBufferMappedResult result = {};
- result.buffer = reinterpret_cast<DawnBuffer>(buffer);
+ WGPUCreateBufferMappedResult result = {};
+ result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
result.data = data;
result.dataLength = size;
return result;
}
void DeviceBase::CreateBufferMappedAsync(const BufferDescriptor* descriptor,
- dawn::BufferCreateMappedCallback callback,
+ wgpu::BufferCreateMappedCallback callback,
void* userdata) {
- DawnCreateBufferMappedResult result = CreateBufferMapped(descriptor);
+ WGPUCreateBufferMappedResult result = CreateBufferMapped(descriptor);
- DawnBufferMapAsyncStatus status = DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS;
+ WGPUBufferMapAsyncStatus status = WGPUBufferMapAsyncStatus_Success;
if (result.data == nullptr || result.dataLength != descriptor->size) {
- status = DAWN_BUFFER_MAP_ASYNC_STATUS_ERROR;
+ status = WGPUBufferMapAsyncStatus_Error;
}
DeferredCreateBufferMappedAsync deferred_info;
@@ -431,9 +445,8 @@ namespace dawn_native {
// The callback is deferred so it matches the async behavior of WebGPU.
mDeferredCreateBufferMappedAsyncResults.push_back(deferred_info);
}
- CommandEncoderBase* DeviceBase::CreateCommandEncoder(
- const CommandEncoderDescriptor* descriptor) {
- return new CommandEncoderBase(this, descriptor);
+ CommandEncoder* DeviceBase::CreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
+ return new CommandEncoder(this, descriptor);
}
ComputePipelineBase* DeviceBase::CreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
@@ -476,12 +489,12 @@ namespace dawn_native {
return result;
}
- RenderBundleEncoderBase* DeviceBase::CreateRenderBundleEncoder(
+ RenderBundleEncoder* DeviceBase::CreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor) {
- RenderBundleEncoderBase* result = nullptr;
+ RenderBundleEncoder* result = nullptr;
if (ConsumedError(CreateRenderBundleEncoderInternal(&result, descriptor))) {
- return RenderBundleEncoderBase::MakeError(this);
+ return RenderBundleEncoder::MakeError(this);
}
return result;
@@ -604,6 +617,10 @@ namespace dawn_native {
return mTogglesSet.IsEnabled(toggle);
}
+ bool DeviceBase::IsValidationEnabled() const {
+ return !IsToggleEnabled(Toggle::SkipValidation);
+ }
+
size_t DeviceBase::GetLazyClearCountForTesting() {
return mLazyClearCountForTesting;
}
@@ -621,7 +638,9 @@ namespace dawn_native {
MaybeError DeviceBase::CreateBindGroupInternal(BindGroupBase** result,
const BindGroupDescriptor* descriptor) {
- DAWN_TRY(ValidateBindGroupDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateBindGroupDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, CreateBindGroupImpl(descriptor));
return {};
}
@@ -629,14 +648,18 @@ namespace dawn_native {
MaybeError DeviceBase::CreateBindGroupLayoutInternal(
BindGroupLayoutBase** result,
const BindGroupLayoutDescriptor* descriptor) {
- DAWN_TRY(ValidateBindGroupLayoutDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateBindGroupLayoutDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, GetOrCreateBindGroupLayout(descriptor));
return {};
}
MaybeError DeviceBase::CreateBufferInternal(BufferBase** result,
const BufferDescriptor* descriptor) {
- DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, CreateBufferImpl(descriptor));
return {};
}
@@ -644,15 +667,33 @@ namespace dawn_native {
MaybeError DeviceBase::CreateComputePipelineInternal(
ComputePipelineBase** result,
const ComputePipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
- DAWN_TRY_ASSIGN(*result, GetOrCreateComputePipeline(descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+ }
+
+ if (descriptor->layout == nullptr) {
+ ComputePipelineDescriptor descriptorWithDefaultLayout = *descriptor;
+
+ DAWN_TRY_ASSIGN(
+ descriptorWithDefaultLayout.layout,
+ PipelineLayoutBase::CreateDefault(this, &descriptor->computeStage.module, 1));
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef = AcquireRef(descriptorWithDefaultLayout.layout);
+
+ DAWN_TRY_ASSIGN(*result, GetOrCreateComputePipeline(&descriptorWithDefaultLayout));
+ } else {
+ DAWN_TRY_ASSIGN(*result, GetOrCreateComputePipeline(descriptor));
+ }
return {};
}
MaybeError DeviceBase::CreatePipelineLayoutInternal(
PipelineLayoutBase** result,
const PipelineLayoutDescriptor* descriptor) {
- DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, GetOrCreatePipelineLayout(descriptor));
return {};
}
@@ -663,45 +704,80 @@ namespace dawn_native {
}
MaybeError DeviceBase::CreateRenderBundleEncoderInternal(
- RenderBundleEncoderBase** result,
+ RenderBundleEncoder** result,
const RenderBundleEncoderDescriptor* descriptor) {
- DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
- *result = new RenderBundleEncoderBase(this, descriptor);
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
+ }
+ *result = new RenderBundleEncoder(this, descriptor);
return {};
}
MaybeError DeviceBase::CreateRenderPipelineInternal(
RenderPipelineBase** result,
const RenderPipelineDescriptor* descriptor) {
- DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
- DAWN_TRY_ASSIGN(*result, GetOrCreateRenderPipeline(descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+ }
+
+ if (descriptor->layout == nullptr) {
+ RenderPipelineDescriptor descriptorWithDefaultLayout = *descriptor;
+
+ const ShaderModuleBase* modules[2];
+ modules[0] = descriptor->vertexStage.module;
+ uint32_t count;
+ if (descriptor->fragmentStage == nullptr) {
+ count = 1;
+ } else {
+ modules[1] = descriptor->fragmentStage->module;
+ count = 2;
+ }
+
+ DAWN_TRY_ASSIGN(descriptorWithDefaultLayout.layout,
+ PipelineLayoutBase::CreateDefault(this, modules, count));
+ // Ref will keep the pipeline layout alive until the end of the function where
+ // the pipeline will take another reference.
+ Ref<PipelineLayoutBase> layoutRef = AcquireRef(descriptorWithDefaultLayout.layout);
+
+ DAWN_TRY_ASSIGN(*result, GetOrCreateRenderPipeline(&descriptorWithDefaultLayout));
+ } else {
+ DAWN_TRY_ASSIGN(*result, GetOrCreateRenderPipeline(descriptor));
+ }
return {};
}
MaybeError DeviceBase::CreateSamplerInternal(SamplerBase** result,
const SamplerDescriptor* descriptor) {
- DAWN_TRY(ValidateSamplerDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSamplerDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, GetOrCreateSampler(descriptor));
return {};
}
MaybeError DeviceBase::CreateShaderModuleInternal(ShaderModuleBase** result,
const ShaderModuleDescriptor* descriptor) {
- DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, GetOrCreateShaderModule(descriptor));
return {};
}
MaybeError DeviceBase::CreateSwapChainInternal(SwapChainBase** result,
const SwapChainDescriptor* descriptor) {
- DAWN_TRY(ValidateSwapChainDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateSwapChainDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, CreateSwapChainImpl(descriptor));
return {};
}
MaybeError DeviceBase::CreateTextureInternal(TextureBase** result,
const TextureDescriptor* descriptor) {
- DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
+ }
DAWN_TRY_ASSIGN(*result, CreateTextureImpl(descriptor));
return {};
}
@@ -711,7 +787,9 @@ namespace dawn_native {
const TextureViewDescriptor* descriptor) {
DAWN_TRY(ValidateObject(texture));
TextureViewDescriptor desc = GetTextureViewDescriptorWithDefaults(texture, descriptor);
- DAWN_TRY(ValidateTextureViewDescriptor(texture, &desc));
+ if (IsValidationEnabled()) {
+ DAWN_TRY(ValidateTextureViewDescriptor(texture, &desc));
+ }
DAWN_TRY_ASSIGN(*result, CreateTextureViewImpl(texture, &desc));
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 85ca254e05c..e887fa7c1a3 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -46,7 +46,7 @@ namespace dawn_native {
DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
virtual ~DeviceBase();
- void HandleError(dawn::ErrorType type, const char* message);
+ void HandleError(wgpu::ErrorType type, const char* message);
bool ConsumedError(MaybeError maybeError) {
if (DAWN_UNLIKELY(maybeError.IsError())) {
@@ -74,18 +74,18 @@ namespace dawn_native {
ErrorScopeTracker* GetErrorScopeTracker() const;
FenceSignalTracker* GetFenceSignalTracker() const;
- // Returns the Format corresponding to the dawn::TextureFormat or an error if the format
- // isn't a valid dawn::TextureFormat or isn't supported by this device.
+ // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
+ // isn't a valid wgpu::TextureFormat or isn't supported by this device.
// The pointer returned has the same lifetime as the device.
- ResultOrError<const Format*> GetInternalFormat(dawn::TextureFormat format) const;
+ ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
- // Returns the Format corresponding to the dawn::TextureFormat and assumes the format is
+ // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
// valid and supported.
// The reference returned has the same lifetime as the device.
- const Format& GetValidInternalFormat(dawn::TextureFormat format) const;
+ const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
virtual CommandBufferBase* CreateCommandBuffer(
- CommandEncoderBase* encoder,
+ CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) = 0;
virtual Serial GetCompletedCommandSerial() const = 0;
@@ -141,15 +141,15 @@ namespace dawn_native {
BindGroupBase* CreateBindGroup(const BindGroupDescriptor* descriptor);
BindGroupLayoutBase* CreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
BufferBase* CreateBuffer(const BufferDescriptor* descriptor);
- DawnCreateBufferMappedResult CreateBufferMapped(const BufferDescriptor* descriptor);
+ WGPUCreateBufferMappedResult CreateBufferMapped(const BufferDescriptor* descriptor);
void CreateBufferMappedAsync(const BufferDescriptor* descriptor,
- dawn::BufferCreateMappedCallback callback,
+ wgpu::BufferCreateMappedCallback callback,
void* userdata);
- CommandEncoderBase* CreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+ CommandEncoder* CreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
ComputePipelineBase* CreateComputePipeline(const ComputePipelineDescriptor* descriptor);
PipelineLayoutBase* CreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
QueueBase* CreateQueue();
- RenderBundleEncoderBase* CreateRenderBundleEncoder(
+ RenderBundleEncoder* CreateRenderBundleEncoder(
const RenderBundleEncoderDescriptor* descriptor);
RenderPipelineBase* CreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
SamplerBase* CreateSampler(const SamplerDescriptor* descriptor);
@@ -159,13 +159,13 @@ namespace dawn_native {
TextureViewBase* CreateTextureView(TextureBase* texture,
const TextureViewDescriptor* descriptor);
- void InjectError(dawn::ErrorType type, const char* message);
+ void InjectError(wgpu::ErrorType type, const char* message);
void Tick();
- void SetUncapturedErrorCallback(dawn::ErrorCallback callback, void* userdata);
- void PushErrorScope(dawn::ErrorFilter filter);
- bool PopErrorScope(dawn::ErrorCallback callback, void* userdata);
+ void SetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+ void PushErrorScope(wgpu::ErrorFilter filter);
+ bool PopErrorScope(wgpu::ErrorCallback callback, void* userdata);
ErrorScope* GetCurrentErrorScope();
void Reference();
@@ -185,6 +185,7 @@ namespace dawn_native {
std::vector<const char*> GetTogglesUsed() const;
bool IsExtensionEnabled(Extension extension) const;
bool IsToggleEnabled(Toggle toggle) const;
+ bool IsValidationEnabled() const;
size_t GetLazyClearCountForTesting();
void IncrementLazyClearCountForTesting();
@@ -230,7 +231,7 @@ namespace dawn_native {
const PipelineLayoutDescriptor* descriptor);
MaybeError CreateQueueInternal(QueueBase** result);
MaybeError CreateRenderBundleEncoderInternal(
- RenderBundleEncoderBase** result,
+ RenderBundleEncoder** result,
const RenderBundleEncoderDescriptor* descriptor);
MaybeError CreateRenderPipelineInternal(RenderPipelineBase** result,
const RenderPipelineDescriptor* descriptor);
@@ -261,9 +262,9 @@ namespace dawn_native {
std::unique_ptr<Caches> mCaches;
struct DeferredCreateBufferMappedAsync {
- dawn::BufferCreateMappedCallback callback;
- DawnBufferMapAsyncStatus status;
- DawnCreateBufferMappedResult result;
+ wgpu::BufferCreateMappedCallback callback;
+ WGPUBufferMapAsyncStatus status;
+ WGPUCreateBufferMappedResult result;
void* userdata;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
index 876b7896689..be92ea09591 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
@@ -18,9 +18,9 @@
namespace dawn_native {
- DynamicUploader::DynamicUploader(DeviceBase* device, size_t size) : mDevice(device) {
- mRingBuffers.emplace_back(
- std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(size)}));
+ DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+ mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
+ new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
}
void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
@@ -28,7 +28,20 @@ namespace dawn_native {
mDevice->GetPendingCommandSerial());
}
- ResultOrError<UploadHandle> DynamicUploader::Allocate(size_t allocationSize, Serial serial) {
+ ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize, Serial serial) {
+ // Disable further sub-allocation should the request be too large.
+ if (allocationSize > kRingBufferSize) {
+ std::unique_ptr<StagingBufferBase> stagingBuffer;
+ DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+
+ UploadHandle uploadHandle;
+ uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
+ uploadHandle.stagingBuffer = stagingBuffer.get();
+
+ ReleaseStagingBuffer(std::move(stagingBuffer));
+ return uploadHandle;
+ }
+
// Note: Validation ensures size is already aligned.
// First-fit: find next smallest buffer large enough to satisfy the allocation request.
RingBuffer* targetRingBuffer = mRingBuffers.back().get();
@@ -36,7 +49,7 @@ namespace dawn_native {
const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
// Prevent overflow.
ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
- const size_t remainingSize =
+ const uint64_t remainingSize =
ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
if (allocationSize <= remainingSize) {
targetRingBuffer = ringBuffer.get();
@@ -44,23 +57,16 @@ namespace dawn_native {
}
}
- size_t startOffset = RingBufferAllocator::kInvalidOffset;
+ uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
if (targetRingBuffer != nullptr) {
startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
}
- // Upon failure, append a newly created (and much larger) ring buffer to fulfill the
+ // Upon failure, append a newly created ring buffer to fulfill the
// request.
if (startOffset == RingBufferAllocator::kInvalidOffset) {
- // Compute the new max size (in powers of two to preserve alignment).
- size_t newMaxSize = targetRingBuffer->mAllocator.GetSize() * 2;
- while (newMaxSize < allocationSize) {
- newMaxSize *= 2;
- }
-
- // TODO(bryan.bernhart@intel.com): Fall-back to no sub-allocations should this fail.
mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
- new RingBuffer{nullptr, RingBufferAllocator(newMaxSize)}));
+ new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
targetRingBuffer = mRingBuffers.back().get();
startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
index f0d4510f153..8210b035b22 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
@@ -25,13 +25,13 @@ namespace dawn_native {
struct UploadHandle {
uint8_t* mappedBuffer = nullptr;
- size_t startOffset = 0;
+ uint64_t startOffset = 0;
StagingBufferBase* stagingBuffer = nullptr;
};
class DynamicUploader {
public:
- DynamicUploader(DeviceBase* device, size_t size = kBaseUploadBufferSize);
+ DynamicUploader(DeviceBase* device);
~DynamicUploader() = default;
// We add functions to Release StagingBuffers to the DynamicUploader as there's
@@ -40,12 +40,11 @@ namespace dawn_native {
// implemented.
void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
- ResultOrError<UploadHandle> Allocate(size_t allocationSize, Serial serial);
+ ResultOrError<UploadHandle> Allocate(uint64_t allocationSize, Serial serial);
void Deallocate(Serial lastCompletedSerial);
private:
- // TODO(bryan.bernhart@intel.com): Figure out this value.
- static constexpr size_t kBaseUploadBufferSize = 64000;
+ static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
struct RingBuffer {
std::unique_ptr<StagingBufferBase> mStagingBuffer;
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
index 121a890992d..b8be06990d6 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.cpp
@@ -15,9 +15,11 @@
#include "dawn_native/EncodingContext.h"
#include "common/Assert.h"
+#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/RenderBundleEncoder.h"
namespace dawn_native {
@@ -32,20 +34,26 @@ namespace dawn_native {
}
CommandIterator EncodingContext::AcquireCommands() {
+ MoveToIterator();
ASSERT(!mWereCommandsAcquired);
mWereCommandsAcquired = true;
return std::move(mIterator);
}
CommandIterator* EncodingContext::GetIterator() {
+ MoveToIterator();
+ ASSERT(!mWereCommandsAcquired);
+ return &mIterator;
+ }
+
+ void EncodingContext::MoveToIterator() {
if (!mWasMovedToIterator) {
mIterator = std::move(mAllocator);
mWasMovedToIterator = true;
}
- return &mIterator;
}
- void EncodingContext::HandleError(dawn::ErrorType type, const char* message) {
+ void EncodingContext::HandleError(wgpu::ErrorType type, const char* message) {
if (!IsFinished()) {
// If the encoding context is not finished, errors are deferred until
// Finish() is called.
@@ -66,13 +74,25 @@ namespace dawn_native {
mCurrentEncoder = passEncoder;
}
- void EncodingContext::ExitPass(const ObjectBase* passEncoder) {
+ void EncodingContext::ExitPass(const ObjectBase* passEncoder, PassResourceUsage passUsage) {
// Assert we're not at the top level.
ASSERT(mCurrentEncoder != mTopLevelEncoder);
// Assert the pass encoder is current.
ASSERT(mCurrentEncoder == passEncoder);
mCurrentEncoder = mTopLevelEncoder;
+ mPassUsages.push_back(std::move(passUsage));
+ }
+
+ const PerPassUsages& EncodingContext::GetPassUsages() const {
+ ASSERT(!mWerePassUsagesAcquired);
+ return mPassUsages;
+ }
+
+ PerPassUsages EncodingContext::AcquirePassUsages() {
+ ASSERT(!mWerePassUsagesAcquired);
+ mWerePassUsagesAcquired = true;
+ return std::move(mPassUsages);
}
MaybeError EncodingContext::Finish() {
diff --git a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
index 831db7dcc24..c16d544c0fc 100644
--- a/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/EncodingContext.h
@@ -18,14 +18,15 @@
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/Error.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/dawn_platform.h"
#include <string>
namespace dawn_native {
- class ObjectBase;
class DeviceBase;
+ class ObjectBase;
// Base class for allocating/iterating commands.
// It performs error tracking as well as encoding state for render/compute passes.
@@ -38,7 +39,7 @@ namespace dawn_native {
CommandIterator* GetIterator();
// Functions to handle encoder errors
- void HandleError(dawn::ErrorType type, const char* message);
+ void HandleError(wgpu::ErrorType type, const char* message);
inline void ConsumeError(ErrorData* error) {
HandleError(error->GetType(), error->GetMessage().c_str());
@@ -54,14 +55,14 @@ namespace dawn_native {
}
template <typename EncodeFunction>
- inline bool TryEncode(const void* encoder, EncodeFunction&& encodeFunction) {
+ inline bool TryEncode(const ObjectBase* encoder, EncodeFunction&& encodeFunction) {
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
if (mCurrentEncoder != mTopLevelEncoder) {
// The top level encoder was used when a pass encoder was current.
- HandleError(dawn::ErrorType::Validation,
+ HandleError(wgpu::ErrorType::Validation,
"Command cannot be recorded inside a pass");
} else {
- HandleError(dawn::ErrorType::Validation,
+ HandleError(wgpu::ErrorType::Validation,
"Recording in an error or already ended pass encoder");
}
return false;
@@ -72,11 +73,15 @@ namespace dawn_native {
// Functions to set current encoder state
void EnterPass(const ObjectBase* passEncoder);
- void ExitPass(const ObjectBase* passEncoder);
+ void ExitPass(const ObjectBase* passEncoder, PassResourceUsage passUsages);
MaybeError Finish();
+ const PerPassUsages& GetPassUsages() const;
+ PerPassUsages AcquirePassUsages();
+
private:
bool IsFinished() const;
+ void MoveToIterator();
DeviceBase* mDevice;
@@ -90,6 +95,9 @@ namespace dawn_native {
// CommandEncoder::Begin/EndPass.
const ObjectBase* mCurrentEncoder;
+ PerPassUsages mPassUsages;
+ bool mWerePassUsagesAcquired = false;
+
CommandAllocator mAllocator;
CommandIterator mIterator;
bool mWasMovedToIterator = false;
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
index 06be01e9899..2cd01da2773 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.cpp
@@ -38,16 +38,16 @@ namespace dawn_native {
return mType;
}
- dawn::ErrorType ErrorData::GetType() const {
+ wgpu::ErrorType ErrorData::GetType() const {
switch (mType) {
case InternalErrorType::Validation:
- return dawn::ErrorType::Validation;
+ return wgpu::ErrorType::Validation;
case InternalErrorType::OutOfMemory:
- return dawn::ErrorType::OutOfMemory;
+ return wgpu::ErrorType::OutOfMemory;
case InternalErrorType::DeviceLost:
- return dawn::ErrorType::DeviceLost;
+ return wgpu::ErrorType::DeviceLost;
default:
- return dawn::ErrorType::Unknown;
+ return wgpu::ErrorType::Unknown;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorData.h b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
index 8c56b7ab5a5..a73d90dd234 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorData.h
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorData.h
@@ -19,10 +19,14 @@
#include <string>
#include <vector>
-namespace dawn {
+namespace wgpu {
enum class ErrorType : uint32_t;
}
+namespace dawn {
+ using ErrorType = wgpu::ErrorType;
+}
+
namespace dawn_native {
enum class InternalErrorType : uint32_t;
@@ -40,7 +44,7 @@ namespace dawn_native {
void AppendBacktrace(const char* file, const char* function, int line);
InternalErrorType GetInternalType() const;
- dawn::ErrorType GetType() const;
+ wgpu::ErrorType GetType() const;
const std::string& GetMessage() const;
const std::vector<BacktraceRecord>& GetBacktrace() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
index 1758ef71b3b..2facb8be5cc 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScope.cpp
@@ -20,7 +20,7 @@ namespace dawn_native {
ErrorScope::ErrorScope() = default;
- ErrorScope::ErrorScope(dawn::ErrorFilter errorFilter, ErrorScope* parent)
+ ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter, ErrorScope* parent)
: RefCounted(), mErrorFilter(errorFilter), mParent(parent) {
ASSERT(mParent.Get() != nullptr);
}
@@ -29,10 +29,10 @@ namespace dawn_native {
if (mCallback == nullptr || IsRoot()) {
return;
}
- mCallback(static_cast<DawnErrorType>(mErrorType), mErrorMessage.c_str(), mUserdata);
+ mCallback(static_cast<WGPUErrorType>(mErrorType), mErrorMessage.c_str(), mUserdata);
}
- void ErrorScope::SetCallback(dawn::ErrorCallback callback, void* userdata) {
+ void ErrorScope::SetCallback(wgpu::ErrorCallback callback, void* userdata) {
mCallback = callback;
mUserdata = userdata;
}
@@ -45,28 +45,28 @@ namespace dawn_native {
return mParent.Get() == nullptr;
}
- void ErrorScope::HandleError(dawn::ErrorType type, const char* message) {
+ void ErrorScope::HandleError(wgpu::ErrorType type, const char* message) {
HandleErrorImpl(this, type, message);
}
// static
- void ErrorScope::HandleErrorImpl(ErrorScope* scope, dawn::ErrorType type, const char* message) {
+ void ErrorScope::HandleErrorImpl(ErrorScope* scope, wgpu::ErrorType type, const char* message) {
ErrorScope* currentScope = scope;
for (; !currentScope->IsRoot(); currentScope = currentScope->GetParent()) {
ASSERT(currentScope != nullptr);
bool consumed = false;
switch (type) {
- case dawn::ErrorType::Validation:
- if (currentScope->mErrorFilter != dawn::ErrorFilter::Validation) {
+ case wgpu::ErrorType::Validation:
+ if (currentScope->mErrorFilter != wgpu::ErrorFilter::Validation) {
// Error filter does not match. Move on to the next scope.
continue;
}
consumed = true;
break;
- case dawn::ErrorType::OutOfMemory:
- if (currentScope->mErrorFilter != dawn::ErrorFilter::OutOfMemory) {
+ case wgpu::ErrorType::OutOfMemory:
+ if (currentScope->mErrorFilter != wgpu::ErrorFilter::OutOfMemory) {
// Error filter does not match. Move on to the next scope.
continue;
}
@@ -75,19 +75,19 @@ namespace dawn_native {
// Unknown and DeviceLost are fatal. All error scopes capture them.
// |consumed| is false because these should bubble to all scopes.
- case dawn::ErrorType::Unknown:
- case dawn::ErrorType::DeviceLost:
+ case wgpu::ErrorType::Unknown:
+ case wgpu::ErrorType::DeviceLost:
consumed = false;
break;
- case dawn::ErrorType::NoError:
+ case wgpu::ErrorType::NoError:
default:
UNREACHABLE();
return;
}
// Record the error if the scope doesn't have one yet.
- if (currentScope->mErrorType == dawn::ErrorType::NoError) {
+ if (currentScope->mErrorType == wgpu::ErrorType::NoError) {
currentScope->mErrorType = type;
currentScope->mErrorMessage = message;
}
@@ -100,14 +100,14 @@ namespace dawn_native {
// The root error scope captures all uncaptured errors.
ASSERT(currentScope->IsRoot());
if (currentScope->mCallback) {
- currentScope->mCallback(static_cast<DawnErrorType>(type), message,
+ currentScope->mCallback(static_cast<WGPUErrorType>(type), message,
currentScope->mUserdata);
}
}
void ErrorScope::Destroy() {
if (!IsRoot()) {
- mErrorType = dawn::ErrorType::Unknown;
+ mErrorType = wgpu::ErrorType::Unknown;
mErrorMessage = "Error scope destroyed";
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ErrorScope.h b/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
index f3218cb41ef..fb0bc67e3d3 100644
--- a/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
+++ b/chromium/third_party/dawn/src/dawn_native/ErrorScope.h
@@ -38,27 +38,27 @@ namespace dawn_native {
class ErrorScope : public RefCounted {
public:
ErrorScope(); // Constructor for the root error scope.
- ErrorScope(dawn::ErrorFilter errorFilter, ErrorScope* parent);
+ ErrorScope(wgpu::ErrorFilter errorFilter, ErrorScope* parent);
~ErrorScope();
- void SetCallback(dawn::ErrorCallback callback, void* userdata);
+ void SetCallback(wgpu::ErrorCallback callback, void* userdata);
ErrorScope* GetParent();
- void HandleError(dawn::ErrorType type, const char* message);
+ void HandleError(wgpu::ErrorType type, const char* message);
void Destroy();
private:
bool IsRoot() const;
- static void HandleErrorImpl(ErrorScope* scope, dawn::ErrorType type, const char* message);
+ static void HandleErrorImpl(ErrorScope* scope, wgpu::ErrorType type, const char* message);
- dawn::ErrorFilter mErrorFilter = dawn::ErrorFilter::None;
+ wgpu::ErrorFilter mErrorFilter = wgpu::ErrorFilter::None;
Ref<ErrorScope> mParent = nullptr;
- dawn::ErrorCallback mCallback = nullptr;
+ wgpu::ErrorCallback mCallback = nullptr;
void* mUserdata = nullptr;
- dawn::ErrorType mErrorType = dawn::ErrorType::NoError;
+ wgpu::ErrorType mErrorType = wgpu::ErrorType::NoError;
std::string mErrorMessage = "";
};
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
index 2de7a8511d7..a2b5a9dff16 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.cpp
@@ -24,6 +24,7 @@ namespace dawn_native {
struct ExtensionEnumAndInfo {
Extension extension;
ExtensionInfo info;
+ bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
};
using ExtensionEnumAndInfoList =
@@ -32,7 +33,8 @@ namespace dawn_native {
static constexpr ExtensionEnumAndInfoList kExtensionNameAndInfoList = {
{{Extension::TextureCompressionBC,
{"texture_compression_bc", "Support Block Compressed (BC) texture formats",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"}}}};
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+ &WGPUDeviceProperties::textureCompressionBC}}};
} // anonymous namespace
@@ -60,6 +62,14 @@ namespace dawn_native {
return enabledExtensionNames;
}
+ void ExtensionsSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+ ASSERT(properties != nullptr);
+
+ for (uint32_t i : IterateBitSet(extensionsBitSet)) {
+ properties->*(kExtensionNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+ }
+ }
+
const char* ExtensionEnumToName(Extension extension) {
ASSERT(extension != Extension::InvalidEnum);
diff --git a/chromium/third_party/dawn/src/dawn_native/Extensions.h b/chromium/third_party/dawn/src/dawn_native/Extensions.h
index 274096fe3ea..6e6d82d9f4d 100644
--- a/chromium/third_party/dawn/src/dawn_native/Extensions.h
+++ b/chromium/third_party/dawn/src/dawn_native/Extensions.h
@@ -39,6 +39,7 @@ namespace dawn_native {
void EnableExtension(Extension extension);
bool IsEnabled(Extension extension) const;
std::vector<const char*> GetEnabledExtensionNames() const;
+ void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
};
const char* ExtensionEnumToName(Extension extension);
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.cpp b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
index 55b6b332c90..1ad89b95931 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.cpp
@@ -33,47 +33,47 @@ namespace dawn_native {
// Fence
- FenceBase::FenceBase(QueueBase* queue, const FenceDescriptor* descriptor)
+ Fence::Fence(QueueBase* queue, const FenceDescriptor* descriptor)
: ObjectBase(queue->GetDevice()),
mSignalValue(descriptor->initialValue),
mCompletedValue(descriptor->initialValue),
mQueue(queue) {
}
- FenceBase::FenceBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ObjectBase(device, tag) {
+ Fence::Fence(DeviceBase* device, ObjectBase::ErrorTag tag) : ObjectBase(device, tag) {
}
- FenceBase::~FenceBase() {
+ Fence::~Fence() {
for (auto& request : mRequests.IterateAll()) {
ASSERT(!IsError());
- request.completionCallback(DAWN_FENCE_COMPLETION_STATUS_UNKNOWN, request.userdata);
+ request.completionCallback(WGPUFenceCompletionStatus_Unknown, request.userdata);
}
mRequests.Clear();
}
// static
- FenceBase* FenceBase::MakeError(DeviceBase* device) {
- return new FenceBase(device, ObjectBase::kError);
+ Fence* Fence::MakeError(DeviceBase* device) {
+ return new Fence(device, ObjectBase::kError);
}
- uint64_t FenceBase::GetCompletedValue() const {
+ uint64_t Fence::GetCompletedValue() const {
if (IsError()) {
return 0;
}
return mCompletedValue;
}
- void FenceBase::OnCompletion(uint64_t value,
- dawn::FenceOnCompletionCallback callback,
- void* userdata) {
+ void Fence::OnCompletion(uint64_t value,
+ wgpu::FenceOnCompletionCallback callback,
+ void* userdata) {
if (GetDevice()->ConsumedError(ValidateOnCompletion(value))) {
- callback(DAWN_FENCE_COMPLETION_STATUS_ERROR, userdata);
+ callback(WGPUFenceCompletionStatus_Error, userdata);
return;
}
ASSERT(!IsError());
if (value <= mCompletedValue) {
- callback(DAWN_FENCE_COMPLETION_STATUS_SUCCESS, userdata);
+ callback(WGPUFenceCompletionStatus_Success, userdata);
return;
}
@@ -83,35 +83,35 @@ namespace dawn_native {
mRequests.Enqueue(std::move(request), value);
}
- uint64_t FenceBase::GetSignaledValue() const {
+ uint64_t Fence::GetSignaledValue() const {
ASSERT(!IsError());
return mSignalValue;
}
- const QueueBase* FenceBase::GetQueue() const {
+ const QueueBase* Fence::GetQueue() const {
ASSERT(!IsError());
return mQueue.Get();
}
- void FenceBase::SetSignaledValue(uint64_t signalValue) {
+ void Fence::SetSignaledValue(uint64_t signalValue) {
ASSERT(!IsError());
ASSERT(signalValue > mSignalValue);
mSignalValue = signalValue;
}
- void FenceBase::SetCompletedValue(uint64_t completedValue) {
+ void Fence::SetCompletedValue(uint64_t completedValue) {
ASSERT(!IsError());
ASSERT(completedValue <= mSignalValue);
ASSERT(completedValue > mCompletedValue);
mCompletedValue = completedValue;
for (auto& request : mRequests.IterateUpTo(mCompletedValue)) {
- request.completionCallback(DAWN_FENCE_COMPLETION_STATUS_SUCCESS, request.userdata);
+ request.completionCallback(WGPUFenceCompletionStatus_Success, request.userdata);
}
mRequests.ClearUpTo(mCompletedValue);
}
- MaybeError FenceBase::ValidateOnCompletion(uint64_t value) const {
+ MaybeError Fence::ValidateOnCompletion(uint64_t value) const {
DAWN_TRY(GetDevice()->ValidateObject(this));
if (value > mSignalValue) {
return DAWN_VALIDATION_ERROR("Value greater than fence signaled value");
diff --git a/chromium/third_party/dawn/src/dawn_native/Fence.h b/chromium/third_party/dawn/src/dawn_native/Fence.h
index cd20d26a1ba..1211ecbf8fc 100644
--- a/chromium/third_party/dawn/src/dawn_native/Fence.h
+++ b/chromium/third_party/dawn/src/dawn_native/Fence.h
@@ -28,19 +28,19 @@ namespace dawn_native {
MaybeError ValidateFenceDescriptor(const FenceDescriptor* descriptor);
- class FenceBase : public ObjectBase {
+ class Fence final : public ObjectBase {
public:
- FenceBase(QueueBase* queue, const FenceDescriptor* descriptor);
- ~FenceBase();
+ Fence(QueueBase* queue, const FenceDescriptor* descriptor);
+ ~Fence();
- static FenceBase* MakeError(DeviceBase* device);
+ static Fence* MakeError(DeviceBase* device);
uint64_t GetSignaledValue() const;
const QueueBase* GetQueue() const;
// Dawn API
uint64_t GetCompletedValue() const;
- void OnCompletion(uint64_t value, dawn::FenceOnCompletionCallback callback, void* userdata);
+ void OnCompletion(uint64_t value, wgpu::FenceOnCompletionCallback callback, void* userdata);
protected:
friend class QueueBase;
@@ -49,12 +49,12 @@ namespace dawn_native {
void SetCompletedValue(uint64_t completedValue);
private:
- FenceBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ Fence(DeviceBase* device, ObjectBase::ErrorTag tag);
MaybeError ValidateOnCompletion(uint64_t value) const;
struct OnCompletionData {
- dawn::FenceOnCompletionCallback completionCallback = nullptr;
+ wgpu::FenceOnCompletionCallback completionCallback = nullptr;
void* userdata = nullptr;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
index 132ac9c4996..1daf10a9980 100644
--- a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.cpp
@@ -26,7 +26,7 @@ namespace dawn_native {
ASSERT(mFencesInFlight.Empty());
}
- void FenceSignalTracker::UpdateFenceOnComplete(FenceBase* fence, uint64_t value) {
+ void FenceSignalTracker::UpdateFenceOnComplete(Fence* fence, uint64_t value) {
// Because we currently only have a single queue, we can simply update
// the fence completed value once the last submitted serial has passed.
mFencesInFlight.Enqueue(FenceInFlight{fence, value},
diff --git a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.h b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.h
index d689277244e..53333e946b2 100644
--- a/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/FenceSignalTracker.h
@@ -21,11 +21,11 @@
namespace dawn_native {
class DeviceBase;
- class FenceBase;
+ class Fence;
class FenceSignalTracker {
struct FenceInFlight {
- Ref<FenceBase> fence;
+ Ref<Fence> fence;
uint64_t value;
};
@@ -33,7 +33,7 @@ namespace dawn_native {
FenceSignalTracker(DeviceBase* device);
~FenceSignalTracker();
- void UpdateFenceOnComplete(FenceBase* fence, uint64_t value);
+ void UpdateFenceOnComplete(Fence* fence, uint64_t value);
void Tick(Serial finishedSerial);
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
index c604a87d9c8..c5537848a56 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Format.cpp
@@ -22,6 +22,41 @@ namespace dawn_native {
// Format
+ // static
+ Format::Type Format::TextureComponentTypeToFormatType(
+ wgpu::TextureComponentType componentType) {
+ switch (componentType) {
+ case wgpu::TextureComponentType::Float:
+ case wgpu::TextureComponentType::Sint:
+ case wgpu::TextureComponentType::Uint:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that Type correctly mirrors TextureComponentType except for "Other".
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Float) == Type::Float, "");
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Sint) == Type::Sint, "");
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Uint) == Type::Uint, "");
+ return static_cast<Type>(componentType);
+ }
+
+ // static
+ wgpu::TextureComponentType Format::FormatTypeToTextureComponentType(Type type) {
+ switch (type) {
+ case Type::Float:
+ case Type::Sint:
+ case Type::Uint:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that Type correctly mirrors TextureComponentType except for "Other".
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Float) == Type::Float, "");
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Sint) == Type::Sint, "");
+ static_assert(static_cast<Type>(wgpu::TextureComponentType::Uint) == Type::Uint, "");
+ return static_cast<wgpu::TextureComponentType>(type);
+ }
+
bool Format::IsColor() const {
return aspect == Aspect::Color;
}
@@ -38,23 +73,13 @@ namespace dawn_native {
return aspect != Color;
}
- bool Format::HasComponentType(dawn::TextureComponentType componentType) const {
+ bool Format::HasComponentType(wgpu::TextureComponentType componentType) const {
// Depth stencil textures need to be special cased but we don't support sampling them yet.
if (aspect != Color) {
return false;
}
- // Check that Type is correctly mirrors TextureComponentType except for "Other".
- static_assert(static_cast<dawn::TextureComponentType>(Type::Float) ==
- dawn::TextureComponentType::Float,
- "");
- static_assert(
- static_cast<dawn::TextureComponentType>(Type::Sint) == dawn::TextureComponentType::Sint,
- "");
- static_assert(
- static_cast<dawn::TextureComponentType>(Type::Uint) == dawn::TextureComponentType::Uint,
- "");
- return static_cast<dawn::TextureComponentType>(type) == componentType;
+ return TextureComponentTypeToFormatType(componentType) == type;
}
size_t Format::GetIndex() const {
@@ -65,10 +90,10 @@ namespace dawn_native {
// For the enum for formats are packed but this might change when we have a broader extension
// mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
- size_t ComputeFormatIndex(dawn::TextureFormat format) {
+ size_t ComputeFormatIndex(wgpu::TextureFormat format) {
// This takes advantage of overflows to make the index of TextureFormat::Undefined outside
// of the range of the FormatTable.
- static_assert(static_cast<uint32_t>(dawn::TextureFormat::Undefined) - 1 > kKnownFormatCount,
+ static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount,
"");
return static_cast<size_t>(static_cast<uint32_t>(format) - 1);
}
@@ -92,7 +117,7 @@ namespace dawn_native {
formatsSet.set(index);
};
- auto AddColorFormat = [&AddFormat](dawn::TextureFormat format, bool renderable,
+ auto AddColorFormat = [&AddFormat](wgpu::TextureFormat format, bool renderable,
uint32_t byteSize, Type type) {
Format internalFormat;
internalFormat.format = format;
@@ -107,7 +132,7 @@ namespace dawn_native {
AddFormat(internalFormat);
};
- auto AddDepthStencilFormat = [&AddFormat](dawn::TextureFormat format, Format::Aspect aspect,
+ auto AddDepthStencilFormat = [&AddFormat](wgpu::TextureFormat format, Format::Aspect aspect,
uint32_t byteSize) {
Format internalFormat;
internalFormat.format = format;
@@ -122,7 +147,7 @@ namespace dawn_native {
AddFormat(internalFormat);
};
- auto AddCompressedFormat = [&AddFormat](dawn::TextureFormat format, uint32_t byteSize,
+ auto AddCompressedFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
uint32_t width, uint32_t height, bool isSupported) {
Format internalFormat;
internalFormat.format = format;
@@ -140,74 +165,74 @@ namespace dawn_native {
// clang-format off
// 1 byte color formats
- AddColorFormat(dawn::TextureFormat::R8Unorm, true, 1, Type::Float);
- AddColorFormat(dawn::TextureFormat::R8Snorm, false, 1, Type::Float);
- AddColorFormat(dawn::TextureFormat::R8Uint, true, 1, Type::Uint);
- AddColorFormat(dawn::TextureFormat::R8Sint, true, 1, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R8Unorm, true, 1, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R8Snorm, false, 1, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R8Uint, true, 1, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R8Sint, true, 1, Type::Sint);
// 2 bytes color formats
- AddColorFormat(dawn::TextureFormat::R16Uint, true, 2, Type::Uint);
- AddColorFormat(dawn::TextureFormat::R16Sint, true, 2, Type::Sint);
- AddColorFormat(dawn::TextureFormat::R16Float, true, 2, Type::Float);
- AddColorFormat(dawn::TextureFormat::RG8Unorm, true, 2, Type::Float);
- AddColorFormat(dawn::TextureFormat::RG8Snorm, false, 2, Type::Float);
- AddColorFormat(dawn::TextureFormat::RG8Uint, true, 2, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RG8Sint, true, 2, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R16Uint, true, 2, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R16Sint, true, 2, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R16Float, true, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Unorm, true, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Snorm, false, 2, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG8Uint, true, 2, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG8Sint, true, 2, Type::Sint);
// 4 bytes color formats
- AddColorFormat(dawn::TextureFormat::R32Uint, true, 4, Type::Uint);
- AddColorFormat(dawn::TextureFormat::R32Sint, true, 4, Type::Sint);
- AddColorFormat(dawn::TextureFormat::R32Float, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RG16Uint, true, 4, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RG16Sint, true, 4, Type::Sint);
- AddColorFormat(dawn::TextureFormat::RG16Float, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGBA8Unorm, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGBA8UnormSrgb, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGBA8Snorm, false, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGBA8Uint, true, 4, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RGBA8Sint, true, 4, Type::Sint);
- AddColorFormat(dawn::TextureFormat::BGRA8Unorm, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::BGRA8UnormSrgb, true, 4, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGB10A2Unorm, true, 4, Type::Float);
-
- AddColorFormat(dawn::TextureFormat::RG11B10Float, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::R32Uint, true, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::R32Sint, true, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::R32Float, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG16Uint, true, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG16Sint, true, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RG16Float, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Unorm, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8UnormSrgb, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Snorm, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Uint, true, 4, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA8Sint, true, 4, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::BGRA8Unorm, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, 4, Type::Float);
+
+ AddColorFormat(wgpu::TextureFormat::RG11B10Float, false, 4, Type::Float);
// 8 bytes color formats
- AddColorFormat(dawn::TextureFormat::RG32Uint, true, 8, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RG32Sint, true, 8, Type::Sint);
- AddColorFormat(dawn::TextureFormat::RG32Float, true, 8, Type::Float);
- AddColorFormat(dawn::TextureFormat::RGBA16Uint, true, 8, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RGBA16Sint, true, 8, Type::Sint);
- AddColorFormat(dawn::TextureFormat::RGBA16Float, true, 8, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG32Uint, true, 8, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RG32Sint, true, 8, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RG32Float, true, 8, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Uint, true, 8, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Sint, true, 8, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RGBA16Float, true, 8, Type::Float);
// 16 bytes color formats
- AddColorFormat(dawn::TextureFormat::RGBA32Uint, true, 16, Type::Uint);
- AddColorFormat(dawn::TextureFormat::RGBA32Sint, true, 16, Type::Sint);
- AddColorFormat(dawn::TextureFormat::RGBA32Float, true, 16, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Uint, true, 16, Type::Uint);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Sint, true, 16, Type::Sint);
+ AddColorFormat(wgpu::TextureFormat::RGBA32Float, true, 16, Type::Float);
// Depth stencil formats
- AddDepthStencilFormat(dawn::TextureFormat::Depth32Float, Aspect::Depth, 4);
- AddDepthStencilFormat(dawn::TextureFormat::Depth24Plus, Aspect::Depth, 4);
+ AddDepthStencilFormat(wgpu::TextureFormat::Depth32Float, Aspect::Depth, 4);
+ AddDepthStencilFormat(wgpu::TextureFormat::Depth24Plus, Aspect::Depth, 4);
// TODO(cwallez@chromium.org): It isn't clear if this format should be copyable
// because its size isn't well defined, is it 4, 5 or 8?
- AddDepthStencilFormat(dawn::TextureFormat::Depth24PlusStencil8, Aspect::DepthStencil, 4);
+ AddDepthStencilFormat(wgpu::TextureFormat::Depth24PlusStencil8, Aspect::DepthStencil, 4);
// BC compressed formats
bool isBCFormatSupported = device->IsExtensionEnabled(Extension::TextureCompressionBC);
- AddCompressedFormat(dawn::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC4RUnorm, 8, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC2RGBAUnorm, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC2RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC3RGBAUnorm, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC3RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC5RGSnorm, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC5RGUnorm, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC6HRGBSfloat, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC6HRGBUfloat, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported);
- AddCompressedFormat(dawn::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnorm, 8, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, 8, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC4RSnorm, 8, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC4RUnorm, 8, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnorm, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnorm, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC5RGSnorm, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC5RGUnorm, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC6HRGBSfloat, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC6HRGBUfloat, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnorm, 16, 4, 4, isBCFormatSupported);
+ AddCompressedFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, 16, 4, 4, isBCFormatSupported);
// clang-format on
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
index 4ec19b8b918..f1e4fbb4874 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ b/chromium/third_party/dawn/src/dawn_native/Format.h
@@ -29,7 +29,7 @@ namespace dawn_native {
// exact number of known format.
static constexpr size_t kKnownFormatCount = 52;
- // A dawn::TextureFormat along with all the information about it necessary for validation.
+ // A wgpu::TextureFormat along with all the information about it necessary for validation.
struct Format {
enum Aspect {
Color,
@@ -45,7 +45,7 @@ namespace dawn_native {
Other,
};
- dawn::TextureFormat format;
+ wgpu::TextureFormat format;
bool isRenderable;
bool isCompressed;
// A format can be known but not supported because it is part of a disabled extension.
@@ -57,11 +57,14 @@ namespace dawn_native {
uint32_t blockWidth;
uint32_t blockHeight;
+ static Type TextureComponentTypeToFormatType(wgpu::TextureComponentType componentType);
+ static wgpu::TextureComponentType FormatTypeToTextureComponentType(Type type);
+
bool IsColor() const;
bool HasDepth() const;
bool HasStencil() const;
bool HasDepthOrStencil() const;
- bool HasComponentType(dawn::TextureComponentType componentType) const;
+ bool HasComponentType(wgpu::TextureComponentType componentType) const;
// The index of the format in the list of all known formats: a unique number for each format
// in [0, kKnownFormatCount)
@@ -73,7 +76,7 @@ namespace dawn_native {
using FormatTable = std::array<Format, kKnownFormatCount>;
// Returns the index of a format in the FormatTable.
- size_t ComputeFormatIndex(dawn::TextureFormat format);
+ size_t ComputeFormatIndex(wgpu::TextureFormat format);
// Builds the format table with the extensions enabled on the device.
FormatTable BuildFormatTable(const DeviceBase* device);
diff --git a/chromium/third_party/dawn/src/dawn_native/Forward.h b/chromium/third_party/dawn/src/dawn_native/Forward.h
index ad73beffac3..948fbfd8ae7 100644
--- a/chromium/third_party/dawn/src/dawn_native/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/Forward.h
@@ -25,16 +25,16 @@ namespace dawn_native {
class BufferBase;
class ComputePipelineBase;
class CommandBufferBase;
- class CommandEncoderBase;
- class ComputePassEncoderBase;
- class FenceBase;
+ class CommandEncoder;
+ class ComputePassEncoder;
+ class Fence;
class InstanceBase;
class PipelineBase;
class PipelineLayoutBase;
class QueueBase;
class RenderBundleBase;
- class RenderBundleEncoderBase;
- class RenderPassEncoderBase;
+ class RenderBundleEncoder;
+ class RenderPassEncoder;
class RenderPipelineBase;
class ResourceHeapBase;
class SamplerBase;
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp b/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
index 3e40af4de69..0f8c1a35257 100644
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ObjectBase.cpp
@@ -16,10 +16,14 @@
namespace dawn_native {
- ObjectBase::ObjectBase(DeviceBase* device) : mDevice(device), mIsError(false) {
+ static constexpr uint64_t kErrorPayload = 0;
+ static constexpr uint64_t kNotErrorPayload = 1;
+
+ ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
}
- ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) : mDevice(device), mIsError(true) {
+ ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
+ : RefCounted(kErrorPayload), mDevice(device) {
}
ObjectBase::~ObjectBase() {
@@ -30,7 +34,7 @@ namespace dawn_native {
}
bool ObjectBase::IsError() const {
- return mIsError;
+ return GetRefCountPayload() == kErrorPayload;
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h b/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
index 02dd7ec6ba1..2a1f86a9a49 100644
--- a/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
+++ b/chromium/third_party/dawn/src/dawn_native/ObjectBase.h
@@ -35,10 +35,6 @@ namespace dawn_native {
private:
DeviceBase* mDevice;
- // TODO(cwallez@chromium.org): This most likely adds 4 bytes to most Dawn objects, see if
- // that bit can be hidden in the refcount once it is a single 64bit refcount.
- // See https://bugs.chromium.org/p/dawn/issues/detail?id=105
- bool mIsError;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
index eebeda6db1f..c2a20717373 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
@@ -30,14 +30,16 @@ namespace dawn_native {
// re-compute it.
struct PassResourceUsage {
std::vector<BufferBase*> buffers;
- std::vector<dawn::BufferUsage> bufferUsages;
+ std::vector<wgpu::BufferUsage> bufferUsages;
std::vector<TextureBase*> textures;
- std::vector<dawn::TextureUsage> textureUsages;
+ std::vector<wgpu::TextureUsage> textureUsages;
};
+ using PerPassUsages = std::vector<PassResourceUsage>;
+
struct CommandBufferResourceUsage {
- std::vector<PassResourceUsage> perPass;
+ PerPassUsages perPass;
std::set<BufferBase*> topLevelBuffers;
std::set<TextureBase*> topLevelTextures;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index 8ae4edfdb5e..4831b369898 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -19,80 +19,16 @@
namespace dawn_native {
- void PassResourceUsageTracker::BufferUsedAs(BufferBase* buffer, dawn::BufferUsage usage) {
+ void PassResourceUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
// std::map's operator[] will create the key and return 0 if the key didn't exist
// before.
- dawn::BufferUsage& storedUsage = mBufferUsages[buffer];
-
- if (usage == dawn::BufferUsage::Storage && storedUsage & dawn::BufferUsage::Storage) {
- mStorageUsedMultipleTimes = true;
- }
-
- storedUsage |= usage;
+ mBufferUsages[buffer] |= usage;
}
- void PassResourceUsageTracker::TextureUsedAs(TextureBase* texture, dawn::TextureUsage usage) {
+ void PassResourceUsageTracker::TextureUsedAs(TextureBase* texture, wgpu::TextureUsage usage) {
// std::map's operator[] will create the key and return 0 if the key didn't exist
// before.
- dawn::TextureUsage& storedUsage = mTextureUsages[texture];
-
- if (usage == dawn::TextureUsage::Storage && storedUsage & dawn::TextureUsage::Storage) {
- mStorageUsedMultipleTimes = true;
- }
-
- storedUsage |= usage;
- }
-
- MaybeError PassResourceUsageTracker::ValidateComputePassUsages() const {
- // Storage resources cannot be used twice in the same compute pass
- if (mStorageUsedMultipleTimes) {
- return DAWN_VALIDATION_ERROR("Storage resource used multiple times in compute pass");
- }
- return ValidateUsages();
- }
-
- MaybeError PassResourceUsageTracker::ValidateRenderPassUsages() const {
- return ValidateUsages();
- }
-
- // Performs the per-pass usage validation checks
- MaybeError PassResourceUsageTracker::ValidateUsages() const {
- // Buffers can only be used as single-write or multiple read.
- for (auto& it : mBufferUsages) {
- BufferBase* buffer = it.first;
- dawn::BufferUsage usage = it.second;
-
- if (usage & ~buffer->GetUsage()) {
- return DAWN_VALIDATION_ERROR("Buffer missing usage for the pass");
- }
-
- bool readOnly = (usage & kReadOnlyBufferUsages) == usage;
- bool singleUse = dawn::HasZeroOrOneBits(usage);
-
- if (!readOnly && !singleUse) {
- return DAWN_VALIDATION_ERROR(
- "Buffer used as writable usage and another usage in pass");
- }
- }
-
- // Textures can only be used as single-write or multiple read.
- // TODO(cwallez@chromium.org): implement per-subresource tracking
- for (auto& it : mTextureUsages) {
- TextureBase* texture = it.first;
- dawn::TextureUsage usage = it.second;
-
- if (usage & ~texture->GetUsage()) {
- return DAWN_VALIDATION_ERROR("Texture missing usage for the pass");
- }
-
- // For textures the only read-only usage in a pass is Sampled, so checking the
- // usage constraint simplifies to checking a single usage bit is set.
- if (!dawn::HasZeroOrOneBits(it.second)) {
- return DAWN_VALIDATION_ERROR("Texture used with more than one usage in pass");
- }
- }
-
- return {};
+ mTextureUsages[texture] |= usage;
}
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
@@ -113,6 +49,9 @@ namespace dawn_native {
result.textureUsages.push_back(it.second);
}
+ mBufferUsages.clear();
+ mTextureUsages.clear();
+
return result;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
index 8f3662f14b0..458b1757070 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.h
@@ -15,7 +15,6 @@
#ifndef DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
#define DAWNNATIVE_PASSRESOURCEUSAGETRACKER_H_
-#include "dawn_native/Error.h"
#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/dawn_platform.h"
@@ -33,22 +32,15 @@ namespace dawn_native {
// information.
class PassResourceUsageTracker {
public:
- void BufferUsedAs(BufferBase* buffer, dawn::BufferUsage usage);
- void TextureUsedAs(TextureBase* texture, dawn::TextureUsage usage);
-
- MaybeError ValidateComputePassUsages() const;
- MaybeError ValidateRenderPassUsages() const;
+ void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
+ void TextureUsedAs(TextureBase* texture, wgpu::TextureUsage usage);
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
PassResourceUsage AcquireResourceUsage();
private:
- // Performs the per-pass usage validation checks
- MaybeError ValidateUsages() const;
-
- std::map<BufferBase*, dawn::BufferUsage> mBufferUsages;
- std::map<TextureBase*, dawn::TextureUsage> mTextureUsages;
- bool mStorageUsedMultipleTimes = false;
+ std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
+ std::map<TextureBase*, wgpu::TextureUsage> mTextureUsages;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PerStage.cpp b/chromium/third_party/dawn/src/dawn_native/PerStage.cpp
index c4837c7b5fb..198d99dbdc6 100644
--- a/chromium/third_party/dawn/src/dawn_native/PerStage.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PerStage.cpp
@@ -16,14 +16,14 @@
namespace dawn_native {
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(dawn::ShaderStage stages) {
+ BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
return BitSetIterator<kNumStages, SingleShaderStage>(bits);
}
- dawn::ShaderStage StageBit(SingleShaderStage stage) {
+ wgpu::ShaderStage StageBit(SingleShaderStage stage) {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
- return static_cast<dawn::ShaderStage>(1 << static_cast<uint32_t>(stage));
+ return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PerStage.h b/chromium/third_party/dawn/src/dawn_native/PerStage.h
index ac92b5b78db..be9f4c459dd 100644
--- a/chromium/third_party/dawn/src/dawn_native/PerStage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PerStage.h
@@ -31,21 +31,21 @@ namespace dawn_native {
static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages, "");
static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages, "");
- static_assert(static_cast<uint32_t>(dawn::ShaderStage::Vertex) ==
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
(1 << static_cast<uint32_t>(SingleShaderStage::Vertex)),
"");
- static_assert(static_cast<uint32_t>(dawn::ShaderStage::Fragment) ==
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
(1 << static_cast<uint32_t>(SingleShaderStage::Fragment)),
"");
- static_assert(static_cast<uint32_t>(dawn::ShaderStage::Compute) ==
+ static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
(1 << static_cast<uint32_t>(SingleShaderStage::Compute)),
"");
- BitSetIterator<kNumStages, SingleShaderStage> IterateStages(dawn::ShaderStage stages);
- dawn::ShaderStage StageBit(SingleShaderStage stage);
+ BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
+ wgpu::ShaderStage StageBit(SingleShaderStage stage);
- static constexpr dawn::ShaderStage kAllStages =
- static_cast<dawn::ShaderStage>((1 << kNumStages) - 1);
+ static constexpr wgpu::ShaderStage kAllStages =
+ static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
template <typename T>
class PerStage {
@@ -64,12 +64,12 @@ namespace dawn_native {
return mData[static_cast<uint32_t>(stage)];
}
- T& operator[](dawn::ShaderStage stageBit) {
+ T& operator[](wgpu::ShaderStage stageBit) {
uint32_t bit = static_cast<uint32_t>(stageBit);
DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return mData[Log2(bit)];
}
- const T& operator[](dawn::ShaderStage stageBit) const {
+ const T& operator[](wgpu::ShaderStage stageBit) const {
uint32_t bit = static_cast<uint32_t>(stageBit);
DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return mData[Log2(bit)];
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index a8c6e9b711a..ad819d1f0bf 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/Pipeline.h"
+#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Device.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/ShaderModule.h"
@@ -32,7 +33,7 @@ namespace dawn_native {
if (descriptor->module->GetExecutionModel() != stage) {
return DAWN_VALIDATION_ERROR("Setting module with wrong stages");
}
- if (!descriptor->module->IsCompatibleWithPipelineLayout(layout)) {
+ if (layout != nullptr && !descriptor->module->IsCompatibleWithPipelineLayout(layout)) {
return DAWN_VALIDATION_ERROR("Stage not compatible with layout");
}
return {};
@@ -42,15 +43,15 @@ namespace dawn_native {
PipelineBase::PipelineBase(DeviceBase* device,
PipelineLayoutBase* layout,
- dawn::ShaderStage stages)
- : ObjectBase(device), mStageMask(stages), mLayout(layout) {
+ wgpu::ShaderStage stages)
+ : CachedObject(device), mStageMask(stages), mLayout(layout) {
}
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : CachedObject(device, tag) {
}
- dawn::ShaderStage PipelineBase::GetStageMask() const {
+ wgpu::ShaderStage PipelineBase::GetStageMask() const {
ASSERT(!IsError());
return mStageMask;
}
@@ -65,4 +66,41 @@ namespace dawn_native {
return mLayout.Get();
}
+ MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t group) {
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
+ if (group >= kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("Bind group layout index out of bounds");
+ }
+ return {};
+ }
+
+ BindGroupLayoutBase* PipelineBase::GetBindGroupLayout(uint32_t group) {
+ if (GetDevice()->ConsumedError(ValidateGetBindGroupLayout(group))) {
+ return BindGroupLayoutBase::MakeError(GetDevice());
+ }
+
+ if (!mLayout->GetBindGroupLayoutsMask()[group]) {
+ // Get or create an empty bind group layout.
+ // TODO(enga): Consider caching this object on the Device and reusing it.
+ // Today, this can't be done correctly because of the order of Device destruction.
+ // For example, vulkan::~Device will be called before ~DeviceBase. If DeviceBase owns
+ // a Ref<BindGroupLayoutBase>, then the VkDevice will be destroyed before the
+ // VkDescriptorSetLayout.
+ BindGroupLayoutDescriptor desc = {};
+ desc.bindingCount = 0;
+ desc.bindings = nullptr;
+
+ BindGroupLayoutBase* bgl = nullptr;
+ if (GetDevice()->ConsumedError(GetDevice()->GetOrCreateBindGroupLayout(&desc), &bgl)) {
+ return BindGroupLayoutBase::MakeError(GetDevice());
+ }
+ return bgl;
+ }
+
+ BindGroupLayoutBase* bgl = mLayout->GetBindGroupLayout(group);
+ bgl->Reference();
+ return bgl;
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.h b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
index 21599548af1..29c838602ba 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_PIPELINE_H_
#define DAWNNATIVE_PIPELINE_H_
+#include "dawn_native/CachedObject.h"
#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
#include "dawn_native/PerStage.h"
#include "dawn_native/PipelineLayout.h"
#include "dawn_native/ShaderModule.h"
@@ -33,18 +33,21 @@ namespace dawn_native {
const PipelineLayoutBase* layout,
SingleShaderStage stage);
- class PipelineBase : public ObjectBase {
+ class PipelineBase : public CachedObject {
public:
- dawn::ShaderStage GetStageMask() const;
+ wgpu::ShaderStage GetStageMask() const;
PipelineLayoutBase* GetLayout();
const PipelineLayoutBase* GetLayout() const;
+ BindGroupLayoutBase* GetBindGroupLayout(uint32_t group);
protected:
- PipelineBase(DeviceBase* device, PipelineLayoutBase* layout, dawn::ShaderStage stages);
+ PipelineBase(DeviceBase* device, PipelineLayoutBase* layout, wgpu::ShaderStage stages);
PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
private:
- dawn::ShaderStage mStageMask;
+ MaybeError ValidateGetBindGroupLayout(uint32_t group);
+
+ wgpu::ShaderStage mStageMask;
Ref<PipelineLayoutBase> mLayout;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index 8c2a4296a26..b76e553377a 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -19,9 +19,22 @@
#include "common/HashUtils.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Device.h"
+#include "dawn_native/ShaderModule.h"
namespace dawn_native {
+ namespace {
+
+ bool operator==(const BindGroupLayoutBinding& lhs, const BindGroupLayoutBinding& rhs) {
+ return lhs.binding == rhs.binding && lhs.visibility == rhs.visibility &&
+ lhs.type == rhs.type && lhs.hasDynamicOffset == rhs.hasDynamicOffset &&
+ lhs.multisampled == rhs.multisampled &&
+ lhs.textureDimension == rhs.textureDimension &&
+ lhs.textureComponentType == rhs.textureComponentType;
+ }
+
+ } // anonymous namespace
+
MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
const PipelineLayoutDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
@@ -56,9 +69,8 @@ namespace dawn_native {
// PipelineLayoutBase
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- bool blueprint)
- : ObjectBase(device), mIsBlueprint(blueprint) {
+ const PipelineLayoutDescriptor* descriptor)
+ : CachedObject(device) {
ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
for (uint32_t group = 0; group < descriptor->bindGroupLayoutCount; ++group) {
mBindGroupLayouts[group] = descriptor->bindGroupLayouts[group];
@@ -67,12 +79,12 @@ namespace dawn_native {
}
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : CachedObject(device, tag) {
}
PipelineLayoutBase::~PipelineLayoutBase() {
// Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncachePipelineLayout(this);
}
}
@@ -82,11 +94,126 @@ namespace dawn_native {
return new PipelineLayoutBase(device, ObjectBase::kError);
}
- const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(size_t group) const {
+ // static
+ ResultOrError<PipelineLayoutBase*> PipelineLayoutBase::CreateDefault(
+ DeviceBase* device,
+ const ShaderModuleBase* const* modules,
+ uint32_t count) {
+ ASSERT(count > 0);
+
+ // Data which BindGroupLayoutDescriptor will point to for creation
+ std::array<std::array<BindGroupLayoutBinding, kMaxBindingsPerGroup>, kMaxBindGroups>
+ bindingData = {};
+
+ // Bitsets of used bindings
+ std::array<std::bitset<kMaxBindingsPerGroup>, kMaxBindGroups> usedBindings = {};
+
+ // A flat map of bindings to the index in |bindingData|
+ std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups> usedBindingsMap = {};
+
+ // A counter of how many bindings we've populated in |bindingData|
+ std::array<uint32_t, kMaxBindGroups> bindingCounts = {};
+
+ uint32_t bindGroupLayoutCount = 0;
+ for (uint32_t moduleIndex = 0; moduleIndex < count; ++moduleIndex) {
+ const ShaderModuleBase* module = modules[moduleIndex];
+ const ShaderModuleBase::ModuleBindingInfo& info = module->GetBindingInfo();
+
+ for (uint32_t group = 0; group < info.size(); ++group) {
+ for (uint32_t binding = 0; binding < info[group].size(); ++binding) {
+ const ShaderModuleBase::BindingInfo& bindingInfo = info[group][binding];
+ if (!bindingInfo.used) {
+ continue;
+ }
+
+ if (bindingInfo.multisampled) {
+ return DAWN_VALIDATION_ERROR("Multisampled textures not supported (yet)");
+ }
+
+ BindGroupLayoutBinding bindingSlot;
+ bindingSlot.binding = binding;
+ bindingSlot.visibility = wgpu::ShaderStage::Vertex |
+ wgpu::ShaderStage::Fragment |
+ wgpu::ShaderStage::Compute;
+ bindingSlot.type = bindingInfo.type;
+ bindingSlot.hasDynamicOffset = false;
+ bindingSlot.multisampled = bindingInfo.multisampled;
+ bindingSlot.textureDimension = bindingInfo.textureDimension;
+ bindingSlot.textureComponentType =
+ Format::FormatTypeToTextureComponentType(bindingInfo.textureComponentType);
+
+ if (usedBindings[group][binding]) {
+ if (bindingSlot == bindingData[group][usedBindingsMap[group][binding]]) {
+ // Already used and the data is the same. Continue.
+ continue;
+ } else {
+ return DAWN_VALIDATION_ERROR(
+ "Duplicate binding in default pipeline layout initialization not "
+ "compatible with previous declaration");
+ }
+ }
+
+ uint32_t currentBindingCount = bindingCounts[group];
+ bindingData[group][currentBindingCount] = bindingSlot;
+
+ usedBindingsMap[group][binding] = currentBindingCount;
+ usedBindings[group].set(binding);
+
+ bindingCounts[group]++;
+
+ bindGroupLayoutCount = std::max(bindGroupLayoutCount, group + 1);
+ }
+ }
+ }
+
+ std::array<BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
+ for (uint32_t group = 0; group < bindGroupLayoutCount; ++group) {
+ BindGroupLayoutDescriptor desc = {};
+ desc.bindings = bindingData[group].data();
+ desc.bindingCount = bindingCounts[group];
+
+ // We should never produce a bad descriptor.
+ ASSERT(!ValidateBindGroupLayoutDescriptor(device, &desc).IsError());
+ DAWN_TRY_ASSIGN(bindGroupLayouts[group], device->GetOrCreateBindGroupLayout(&desc));
+ }
+
+ PipelineLayoutDescriptor desc = {};
+ desc.bindGroupLayouts = bindGroupLayouts.data();
+ desc.bindGroupLayoutCount = bindGroupLayoutCount;
+ PipelineLayoutBase* pipelineLayout = device->CreatePipelineLayout(&desc);
+ ASSERT(!pipelineLayout->IsError());
+
+ // These bind group layouts are created internally and referenced by the pipeline layout.
+ // Release the external refcount.
+ for (uint32_t group = 0; group < bindGroupLayoutCount; ++group) {
+ if (bindGroupLayouts[group] != nullptr) {
+ bindGroupLayouts[group]->Release();
+ }
+ }
+
+ for (uint32_t moduleIndex = 0; moduleIndex < count; ++moduleIndex) {
+ ASSERT(modules[moduleIndex]->IsCompatibleWithPipelineLayout(pipelineLayout));
+ }
+
+ return pipelineLayout;
+ }
+
+ const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(uint32_t group) const {
+ ASSERT(!IsError());
+ ASSERT(group < kMaxBindGroups);
+ ASSERT(mMask[group]);
+ const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
+ }
+
+ BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(uint32_t group) {
ASSERT(!IsError());
ASSERT(group < kMaxBindGroups);
ASSERT(mMask[group]);
- return mBindGroupLayouts[group].Get();
+ BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+ ASSERT(bgl != nullptr);
+ return bgl;
}
const std::bitset<kMaxBindGroups> PipelineLayoutBase::GetBindGroupLayoutsMask() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
index f10149805c5..f919effbb5d 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.h
@@ -16,9 +16,9 @@
#define DAWNNATIVE_PIPELINELAYOUT_H_
#include "common/Constants.h"
+#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -32,16 +32,17 @@ namespace dawn_native {
using BindGroupLayoutArray = std::array<Ref<BindGroupLayoutBase>, kMaxBindGroups>;
- class PipelineLayoutBase : public ObjectBase {
+ class PipelineLayoutBase : public CachedObject {
public:
- PipelineLayoutBase(DeviceBase* device,
- const PipelineLayoutDescriptor* descriptor,
- bool blueprint = false);
+ PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
~PipelineLayoutBase() override;
static PipelineLayoutBase* MakeError(DeviceBase* device);
+ static ResultOrError<PipelineLayoutBase*>
+ CreateDefault(DeviceBase* device, const ShaderModuleBase* const* modules, uint32_t count);
- const BindGroupLayoutBase* GetBindGroupLayout(size_t group) const;
+ const BindGroupLayoutBase* GetBindGroupLayout(uint32_t group) const;
+ BindGroupLayoutBase* GetBindGroupLayout(uint32_t group);
const std::bitset<kMaxBindGroups> GetBindGroupLayoutsMask() const;
// Utility functions to compute inherited bind groups.
@@ -65,7 +66,6 @@ namespace dawn_native {
BindGroupLayoutArray mBindGroupLayouts;
std::bitset<kMaxBindGroups> mMask;
- bool mIsBlueprint = false;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
index e59eab8ec3e..bedf0c44176 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/ProgrammablePassEncoder.h"
+#include "common/BitSetIterator.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
@@ -25,6 +26,46 @@
namespace dawn_native {
+ namespace {
+ void TrackBindGroupResourceUsage(PassResourceUsageTracker* usageTracker,
+ BindGroupBase* group) {
+ const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
+
+ for (uint32_t i : IterateBitSet(layoutInfo.mask)) {
+ wgpu::BindingType type = layoutInfo.types[i];
+
+ switch (type) {
+ case wgpu::BindingType::UniformBuffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+ } break;
+
+ case wgpu::BindingType::StorageBuffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ usageTracker->BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+ } break;
+
+ case wgpu::BindingType::SampledTexture: {
+ TextureBase* texture = group->GetBindingAsTextureView(i)->GetTexture();
+ usageTracker->TextureUsedAs(texture, wgpu::TextureUsage::Sampled);
+ } break;
+
+ case wgpu::BindingType::ReadonlyStorageBuffer: {
+ BufferBase* buffer = group->GetBindingAsBufferBinding(i).buffer;
+ usageTracker->BufferUsedAs(buffer, kReadOnlyStorage);
+ } break;
+
+ case wgpu::BindingType::Sampler:
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ } // namespace
+
ProgrammablePassEncoder::ProgrammablePassEncoder(DeviceBase* device,
EncodingContext* encodingContext)
: ObjectBase(device), mEncodingContext(encodingContext) {
@@ -73,36 +114,38 @@ namespace dawn_native {
void ProgrammablePassEncoder::SetBindGroup(uint32_t groupIndex,
BindGroupBase* group,
uint32_t dynamicOffsetCount,
- const uint64_t* dynamicOffsets) {
+ const uint32_t* dynamicOffsets) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- DAWN_TRY(GetDevice()->ValidateObject(group));
-
- if (groupIndex >= kMaxBindGroups) {
- return DAWN_VALIDATION_ERROR("Setting bind group over the max");
- }
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(group));
- // Dynamic offsets count must match the number required by the layout perfectly.
- const BindGroupLayoutBase* layout = group->GetLayout();
- if (layout->GetDynamicBufferCount() != dynamicOffsetCount) {
- return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
- }
+ if (groupIndex >= kMaxBindGroups) {
+ return DAWN_VALIDATION_ERROR("Setting bind group over the max");
+ }
- for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
- if (dynamicOffsets[i] % kMinDynamicBufferOffsetAlignment != 0) {
- return DAWN_VALIDATION_ERROR("Dynamic Buffer Offset need to be aligned");
+ // Dynamic offsets count must match the number required by the layout perfectly.
+ const BindGroupLayoutBase* layout = group->GetLayout();
+ if (layout->GetDynamicBufferCount() != dynamicOffsetCount) {
+ return DAWN_VALIDATION_ERROR("dynamicOffset count mismatch");
}
- BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+ for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+ if (dynamicOffsets[i] % kMinDynamicBufferOffsetAlignment != 0) {
+ return DAWN_VALIDATION_ERROR("Dynamic Buffer Offset need to be aligned");
+ }
- // During BindGroup creation, validation ensures binding offset + binding size <=
- // buffer size.
- DAWN_ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
- DAWN_ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >=
- bufferBinding.offset);
+ BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
- if ((dynamicOffsets[i] >
- bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
- return DAWN_VALIDATION_ERROR("dynamic offset out of bounds");
+ // During BindGroup creation, validation ensures binding offset + binding size
+ // <= buffer size.
+ DAWN_ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+ DAWN_ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >=
+ bufferBinding.offset);
+
+ if ((dynamicOffsets[i] > bufferBinding.buffer->GetSize() -
+ bufferBinding.offset - bufferBinding.size)) {
+ return DAWN_VALIDATION_ERROR("dynamic offset out of bounds");
+ }
}
}
@@ -111,10 +154,12 @@ namespace dawn_native {
cmd->group = group;
cmd->dynamicOffsetCount = dynamicOffsetCount;
if (dynamicOffsetCount > 0) {
- uint64_t* offsets = allocator->AllocateData<uint64_t>(cmd->dynamicOffsetCount);
- memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint64_t));
+ uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+ memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
}
+ TrackBindGroupResourceUsage(&mUsageTracker, group);
+
return {};
});
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
index fc8f11a9522..17bfeb413b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ProgrammablePassEncoder.h
@@ -18,6 +18,7 @@
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Error.h"
#include "dawn_native/ObjectBase.h"
+#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/dawn_platform.h"
@@ -38,7 +39,7 @@ namespace dawn_native {
void SetBindGroup(uint32_t groupIndex,
BindGroupBase* group,
uint32_t dynamicOffsetCount,
- const uint64_t* dynamicOffsets);
+ const uint32_t* dynamicOffsets);
protected:
// Construct an "error" programmable pass encoder.
@@ -47,6 +48,7 @@ namespace dawn_native {
ErrorTag errorTag);
EncodingContext* mEncodingContext = nullptr;
+ PassResourceUsageTracker mUsageTracker;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index f13079a1372..0fbcdc7d052 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -22,6 +22,7 @@
#include "dawn_native/Fence.h"
#include "dawn_native/FenceSignalTracker.h"
#include "dawn_native/Texture.h"
+#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native {
@@ -33,8 +34,9 @@ namespace dawn_native {
void QueueBase::Submit(uint32_t commandCount, CommandBufferBase* const* commands) {
DeviceBase* device = GetDevice();
- TRACE_EVENT0(device->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "Queue::Submit");
- if (device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+ TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+ if (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateSubmit(commandCount, commands))) {
return;
}
ASSERT(!IsError());
@@ -46,7 +48,7 @@ namespace dawn_native {
device->GetCurrentErrorScope());
}
- void QueueBase::Signal(FenceBase* fence, uint64_t signalValue) {
+ void QueueBase::Signal(Fence* fence, uint64_t signalValue) {
DeviceBase* device = GetDevice();
if (device->ConsumedError(ValidateSignal(fence, signalValue))) {
return;
@@ -59,16 +61,17 @@ namespace dawn_native {
device->GetCurrentErrorScope());
}
- FenceBase* QueueBase::CreateFence(const FenceDescriptor* descriptor) {
+ Fence* QueueBase::CreateFence(const FenceDescriptor* descriptor) {
if (GetDevice()->ConsumedError(ValidateCreateFence(descriptor))) {
- return FenceBase::MakeError(GetDevice());
+ return Fence::MakeError(GetDevice());
}
- return new FenceBase(this, descriptor);
+ return new Fence(this, descriptor);
}
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
CommandBufferBase* const* commands) {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
DAWN_TRY(GetDevice()->ValidateObject(this));
for (uint32_t i = 0; i < commandCount; ++i) {
@@ -96,7 +99,7 @@ namespace dawn_native {
return {};
}
- MaybeError QueueBase::ValidateSignal(const FenceBase* fence, uint64_t signalValue) {
+ MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) {
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(fence));
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index 5d94b3ba9b4..4adec04e11f 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -29,15 +29,15 @@ namespace dawn_native {
// Dawn API
void Submit(uint32_t commandCount, CommandBufferBase* const* commands);
- void Signal(FenceBase* fence, uint64_t signalValue);
- FenceBase* CreateFence(const FenceDescriptor* descriptor);
+ void Signal(Fence* fence, uint64_t signalValue);
+ Fence* CreateFence(const FenceDescriptor* descriptor);
private:
virtual MaybeError SubmitImpl(uint32_t commandCount,
CommandBufferBase* const* commands) = 0;
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands);
- MaybeError ValidateSignal(const FenceBase* fence, uint64_t signalValue);
+ MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue);
MaybeError ValidateCreateFence(const FenceDescriptor* descriptor);
};
diff --git a/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp b/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
index 5ec8050de74..6782c144153 100644
--- a/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RefCounted.cpp
@@ -18,26 +18,39 @@
namespace dawn_native {
- RefCounted::RefCounted() {
+ static constexpr size_t kPayloadBits = 1;
+ static constexpr uint64_t kPayloadMask = (uint64_t(1) << kPayloadBits) - 1;
+ static constexpr uint64_t kRefCountIncrement = (uint64_t(1) << kPayloadBits);
+
+ RefCounted::RefCounted(uint64_t payload) : mRefCount(kRefCountIncrement + payload) {
+ ASSERT((payload & kPayloadMask) == payload);
}
RefCounted::~RefCounted() {
}
- uint64_t RefCounted::GetRefCount() const {
- return mRefCount;
+ uint64_t RefCounted::GetRefCountForTesting() const {
+ return mRefCount >> kPayloadBits;
+ }
+
+ uint64_t RefCounted::GetRefCountPayload() const {
+ // We only care about the payload bits of the refcount. These never change after
+ // initialization so we can use the relaxed memory order. The order doesn't guarantee
+ // anything except the atomicity of the load, which is enough since any past values of the
+ // atomic will have the correct payload bits.
+ return kPayloadMask & mRefCount.load(std::memory_order_relaxed);
}
void RefCounted::Reference() {
- ASSERT(mRefCount != 0);
- mRefCount++;
+ ASSERT((mRefCount & ~kPayloadMask) != 0);
+ mRefCount += kRefCountIncrement;
}
void RefCounted::Release() {
- ASSERT(mRefCount != 0);
+ ASSERT((mRefCount & ~kPayloadMask) != 0);
- mRefCount--;
- if (mRefCount == 0) {
+ mRefCount -= kRefCountIncrement;
+ if (mRefCount < kRefCountIncrement) {
delete this;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RefCounted.h b/chromium/third_party/dawn/src/dawn_native/RefCounted.h
index 89b0666fedb..7b94d2ec082 100644
--- a/chromium/third_party/dawn/src/dawn_native/RefCounted.h
+++ b/chromium/third_party/dawn/src/dawn_native/RefCounted.h
@@ -22,17 +22,18 @@ namespace dawn_native {
class RefCounted {
public:
- RefCounted();
+ RefCounted(uint64_t payload = 0);
virtual ~RefCounted();
- uint64_t GetRefCount() const;
+ uint64_t GetRefCountForTesting() const;
+ uint64_t GetRefCountPayload() const;
// Dawn API
void Reference();
void Release();
protected:
- std::atomic_uint64_t mRefCount = {1};
+ std::atomic_uint64_t mRefCount;
};
template <typename T>
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
index 9cd08ea0e61..b3478867976 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.cpp
@@ -21,7 +21,7 @@
namespace dawn_native {
- RenderBundleBase::RenderBundleBase(RenderBundleEncoderBase* encoder,
+ RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
AttachmentState* attachmentState,
PassResourceUsage resourceUsage)
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
index 26db850e2a7..828c23be967 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundle.h
@@ -30,11 +30,11 @@ namespace dawn_native {
struct BeginRenderPassCmd;
struct RenderBundleDescriptor;
- class RenderBundleEncoderBase;
+ class RenderBundleEncoder;
class RenderBundleBase : public ObjectBase {
public:
- RenderBundleBase(RenderBundleEncoderBase* encoder,
+ RenderBundleBase(RenderBundleEncoder* encoder,
const RenderBundleDescriptor* descriptor,
AttachmentState* attachmentState,
PassResourceUsage resourceUsage);
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
index 8a7e99a58dc..956fa8b3a25 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.cpp
@@ -20,11 +20,13 @@
#include "dawn_native/Format.h"
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/ValidationUtils_autogen.h"
+#include "dawn_platform/DawnPlatform.h"
+#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native {
MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
- dawn::TextureFormat textureFormat) {
+ wgpu::TextureFormat textureFormat) {
DAWN_TRY(ValidateTextureFormat(textureFormat));
const Format* format = nullptr;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
@@ -36,7 +38,7 @@ namespace dawn_native {
}
MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
- dawn::TextureFormat textureFormat) {
+ wgpu::TextureFormat textureFormat) {
DAWN_TRY(ValidateTextureFormat(textureFormat));
const Format* format = nullptr;
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
@@ -60,7 +62,7 @@ namespace dawn_native {
}
if (descriptor->colorFormatsCount == 0 &&
- descriptor->depthStencilFormat == dawn::TextureFormat::Undefined) {
+ descriptor->depthStencilFormat == wgpu::TextureFormat::Undefined) {
return DAWN_VALIDATION_ERROR("Should have at least one attachment format");
}
@@ -68,58 +70,59 @@ namespace dawn_native {
DAWN_TRY(ValidateColorAttachmentFormat(device, descriptor->colorFormats[i]));
}
- if (descriptor->depthStencilFormat != dawn::TextureFormat::Undefined) {
+ if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
DAWN_TRY(ValidateDepthStencilAttachmentFormat(device, descriptor->depthStencilFormat));
}
return {};
}
- RenderBundleEncoderBase::RenderBundleEncoderBase(
- DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor)
+ RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
+ const RenderBundleEncoderDescriptor* descriptor)
: RenderEncoderBase(device, &mEncodingContext),
mEncodingContext(device, this),
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
}
- RenderBundleEncoderBase::RenderBundleEncoderBase(DeviceBase* device, ErrorTag errorTag)
+ RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
: RenderEncoderBase(device, &mEncodingContext, errorTag), mEncodingContext(device, this) {
}
// static
- RenderBundleEncoderBase* RenderBundleEncoderBase::MakeError(DeviceBase* device) {
- return new RenderBundleEncoderBase(device, ObjectBase::kError);
+ RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
+ return new RenderBundleEncoder(device, ObjectBase::kError);
}
- const AttachmentState* RenderBundleEncoderBase::GetAttachmentState() const {
+ const AttachmentState* RenderBundleEncoder::GetAttachmentState() const {
return mAttachmentState.Get();
}
- CommandIterator RenderBundleEncoderBase::AcquireCommands() {
+ CommandIterator RenderBundleEncoder::AcquireCommands() {
return mEncodingContext.AcquireCommands();
}
- RenderBundleBase* RenderBundleEncoderBase::Finish(const RenderBundleDescriptor* descriptor) {
- if (GetDevice()->ConsumedError(ValidateFinish(descriptor))) {
- return RenderBundleBase::MakeError(GetDevice());
+ RenderBundleBase* RenderBundleEncoder::Finish(const RenderBundleDescriptor* descriptor) {
+ PassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+
+ DeviceBase* device = GetDevice();
+ // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+ // state of the encoding context. Subsequent calls to encode commands will generate errors.
+ if (device->ConsumedError(mEncodingContext.Finish()) ||
+ (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateFinish(mEncodingContext.GetIterator(), usages)))) {
+ return RenderBundleBase::MakeError(device);
}
- ASSERT(!IsError());
- return new RenderBundleBase(this, descriptor, mAttachmentState.Get(),
- std::move(mResourceUsage));
+ ASSERT(!IsError());
+ return new RenderBundleBase(this, descriptor, mAttachmentState.Get(), std::move(usages));
}
- MaybeError RenderBundleEncoderBase::ValidateFinish(const RenderBundleDescriptor* descriptor) {
+ MaybeError RenderBundleEncoder::ValidateFinish(CommandIterator* commands,
+ const PassResourceUsage& usages) const {
+ TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
DAWN_TRY(GetDevice()->ValidateObject(this));
-
- // Even if Finish() validation fails, calling it will mutate the internal state of the
- // encoding context. Subsequent calls to encode commands will generate errors.
- DAWN_TRY(mEncodingContext.Finish());
-
- CommandIterator* commands = mEncodingContext.GetIterator();
-
- DAWN_TRY(ValidateRenderBundle(commands, mAttachmentState.Get(), &mResourceUsage));
+ DAWN_TRY(ValidatePassResourceUsage(usages));
+ DAWN_TRY(ValidateRenderBundle(commands, mAttachmentState.Get()));
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
index aa322011c4a..0581719f1dc 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderBundleEncoder.h
@@ -26,12 +26,12 @@ namespace dawn_native {
MaybeError ValidateRenderBundleEncoderDescriptor(
const DeviceBase* device,
const RenderBundleEncoderDescriptor* descriptor);
- class RenderBundleEncoderBase : public RenderEncoderBase {
+
+ class RenderBundleEncoder final : public RenderEncoderBase {
public:
- RenderBundleEncoderBase(DeviceBase* device,
- const RenderBundleEncoderDescriptor* descriptor);
+ RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
- static RenderBundleEncoderBase* MakeError(DeviceBase* device);
+ static RenderBundleEncoder* MakeError(DeviceBase* device);
const AttachmentState* GetAttachmentState() const;
@@ -40,13 +40,12 @@ namespace dawn_native {
CommandIterator AcquireCommands();
private:
- RenderBundleEncoderBase(DeviceBase* device, ErrorTag errorTag);
+ RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
- MaybeError ValidateFinish(const RenderBundleDescriptor* descriptor);
+ MaybeError ValidateFinish(CommandIterator* commands, const PassResourceUsage& usages) const;
EncodingContext mEncodingContext;
Ref<AttachmentState> mAttachmentState;
- PassResourceUsage mResourceUsage;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
index aecaf30afa1..885f7a11803 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderEncoderBase.cpp
@@ -81,6 +81,8 @@ namespace dawn_native {
cmd->indirectBuffer = indirectBuffer;
cmd->indirectOffset = indirectOffset;
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
return {};
});
}
@@ -100,6 +102,8 @@ namespace dawn_native {
cmd->indirectBuffer = indirectBuffer;
cmd->indirectOffset = indirectOffset;
+ mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
return {};
});
}
@@ -125,6 +129,8 @@ namespace dawn_native {
cmd->buffer = buffer;
cmd->offset = offset;
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+
return {};
});
}
@@ -139,6 +145,8 @@ namespace dawn_native {
cmd->buffer = buffer;
cmd->offset = offset;
+ mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+
return {};
});
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
index 27f5df33260..9dbb2ee87c1 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
@@ -27,37 +27,41 @@
namespace dawn_native {
- RenderPassEncoderBase::RenderPassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext)
+ // The usage tracker is passed in here, because it is prepopulated with usages from the
+ // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
+ // command, then this wouldn't be necessary.
+ RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ PassResourceUsageTracker usageTracker)
: RenderEncoderBase(device, encodingContext), mCommandEncoder(commandEncoder) {
+ mUsageTracker = std::move(usageTracker);
}
- RenderPassEncoderBase::RenderPassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag)
+ RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag)
: RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
}
- RenderPassEncoderBase* RenderPassEncoderBase::MakeError(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext) {
- return new RenderPassEncoderBase(device, commandEncoder, encodingContext,
- ObjectBase::kError);
+ RenderPassEncoder* RenderPassEncoder::MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext) {
+ return new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError);
}
- void RenderPassEncoderBase::EndPass() {
+ void RenderPassEncoder::EndPass() {
if (mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
return {};
})) {
- mEncodingContext->ExitPass(this);
+ mEncodingContext->ExitPass(this, mUsageTracker.AcquireResourceUsage());
}
}
- void RenderPassEncoderBase::SetStencilReference(uint32_t reference) {
+ void RenderPassEncoder::SetStencilReference(uint32_t reference) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
SetStencilReferenceCmd* cmd =
allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
@@ -67,7 +71,7 @@ namespace dawn_native {
});
}
- void RenderPassEncoderBase::SetBlendColor(const Color* color) {
+ void RenderPassEncoder::SetBlendColor(const Color* color) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
SetBlendColorCmd* cmd = allocator->Allocate<SetBlendColorCmd>(Command::SetBlendColor);
cmd->color = *color;
@@ -76,12 +80,12 @@ namespace dawn_native {
});
}
- void RenderPassEncoderBase::SetViewport(float x,
- float y,
- float width,
- float height,
- float minDepth,
- float maxDepth) {
+ void RenderPassEncoder::SetViewport(float x,
+ float y,
+ float width,
+ float height,
+ float minDepth,
+ float maxDepth) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if ((isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
isnan(maxDepth))) {
@@ -111,10 +115,10 @@ namespace dawn_native {
});
}
- void RenderPassEncoderBase::SetScissorRect(uint32_t x,
- uint32_t y,
- uint32_t width,
- uint32_t height) {
+ void RenderPassEncoder::SetScissorRect(uint32_t x,
+ uint32_t y,
+ uint32_t width,
+ uint32_t height) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
if (width == 0 || height == 0) {
return DAWN_VALIDATION_ERROR("Width and height must be greater than 0.");
@@ -131,8 +135,7 @@ namespace dawn_native {
});
}
- void RenderPassEncoderBase::ExecuteBundles(uint32_t count,
- RenderBundleBase* const* renderBundles) {
+ void RenderPassEncoder::ExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles) {
mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
for (uint32_t i = 0; i < count; ++i) {
DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
@@ -145,6 +148,14 @@ namespace dawn_native {
Ref<RenderBundleBase>* bundles = allocator->AllocateData<Ref<RenderBundleBase>>(count);
for (uint32_t i = 0; i < count; ++i) {
bundles[i] = renderBundles[i];
+
+ const PassResourceUsage& usages = bundles[i]->GetResourceUsage();
+ for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+ mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
+ }
+ for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+ mUsageTracker.TextureUsedAs(usages.textures[i], usages.textureUsages[i]);
+ }
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
index 4b7c06db76f..cd9ac017fb7 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
@@ -22,19 +22,16 @@ namespace dawn_native {
class RenderBundleBase;
- // This is called RenderPassEncoderBase to match the code generator expectations. Note that it
- // is a pure frontend type to record in its parent CommandEncoder and never has a backend
- // implementation.
- // TODO(cwallez@chromium.org): Remove that generator limitation and rename to RenderPassEncoder
- class RenderPassEncoderBase : public RenderEncoderBase {
+ class RenderPassEncoder final : public RenderEncoderBase {
public:
- RenderPassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext);
+ RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ PassResourceUsageTracker usageTracker);
- static RenderPassEncoderBase* MakeError(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext);
+ static RenderPassEncoder* MakeError(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext);
void EndPass();
@@ -50,15 +47,15 @@ namespace dawn_native {
void ExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
protected:
- RenderPassEncoderBase(DeviceBase* device,
- CommandEncoderBase* commandEncoder,
- EncodingContext* encodingContext,
- ErrorTag errorTag);
+ RenderPassEncoder(DeviceBase* device,
+ CommandEncoder* commandEncoder,
+ EncodingContext* encodingContext,
+ ErrorTag errorTag);
private:
// For render and compute passes, the encoding context is borrowed from the command encoder.
// Keep a reference to the encoder to make sure the context isn't freed.
- Ref<CommandEncoderBase> mCommandEncoder;
+ Ref<CommandEncoder> mCommandEncoder;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index daba3a8ff2c..be870403875 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -61,44 +61,44 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateVertexBufferDescriptor(
- const VertexBufferDescriptor* buffer,
+ MaybeError ValidateVertexBufferLayoutDescriptor(
+ const VertexBufferLayoutDescriptor* buffer,
std::bitset<kMaxVertexAttributes>* attributesSetMask) {
DAWN_TRY(ValidateInputStepMode(buffer->stepMode));
- if (buffer->stride > kMaxVertexBufferStride) {
- return DAWN_VALIDATION_ERROR("Setting input stride out of bounds");
+ if (buffer->arrayStride > kMaxVertexBufferStride) {
+ return DAWN_VALIDATION_ERROR("Setting arrayStride out of bounds");
}
- if (buffer->stride % 4 != 0) {
+ if (buffer->arrayStride % 4 != 0) {
return DAWN_VALIDATION_ERROR(
- "Stride of Vertex buffer needs to be a multiple of 4 bytes");
+ "arrayStride of Vertex buffer needs to be a multiple of 4 bytes");
}
for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
- DAWN_TRY(ValidateVertexAttributeDescriptor(&buffer->attributes[i], buffer->stride,
- attributesSetMask));
+ DAWN_TRY(ValidateVertexAttributeDescriptor(&buffer->attributes[i],
+ buffer->arrayStride, attributesSetMask));
}
return {};
}
- MaybeError ValidateVertexInputDescriptor(
- const VertexInputDescriptor* descriptor,
+ MaybeError ValidateVertexStateDescriptor(
+ const VertexStateDescriptor* descriptor,
std::bitset<kMaxVertexAttributes>* attributesSetMask) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
DAWN_TRY(ValidateIndexFormat(descriptor->indexFormat));
- if (descriptor->bufferCount > kMaxVertexBuffers) {
- return DAWN_VALIDATION_ERROR("Vertex Inputs number exceeds maximum");
+ if (descriptor->vertexBufferCount > kMaxVertexBuffers) {
+ return DAWN_VALIDATION_ERROR("Vertex buffer count exceeds maximum");
}
uint32_t totalAttributesNum = 0;
- for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
- DAWN_TRY(
- ValidateVertexBufferDescriptor(&descriptor->buffers[i], attributesSetMask));
- totalAttributesNum += descriptor->buffers[i].attributeCount;
+ for (uint32_t i = 0; i < descriptor->vertexBufferCount; ++i) {
+ DAWN_TRY(ValidateVertexBufferLayoutDescriptor(&descriptor->vertexBuffers[i],
+ attributesSetMask));
+ totalAttributesNum += descriptor->vertexBuffers[i].attributeCount;
}
// Every vertex attribute has a member called shaderLocation, and there are some
@@ -177,100 +177,100 @@ namespace dawn_native {
} // anonymous namespace
// Helper functions
- size_t IndexFormatSize(dawn::IndexFormat format) {
+ size_t IndexFormatSize(wgpu::IndexFormat format) {
switch (format) {
- case dawn::IndexFormat::Uint16:
+ case wgpu::IndexFormat::Uint16:
return sizeof(uint16_t);
- case dawn::IndexFormat::Uint32:
+ case wgpu::IndexFormat::Uint32:
return sizeof(uint32_t);
default:
UNREACHABLE();
}
}
- uint32_t VertexFormatNumComponents(dawn::VertexFormat format) {
+ uint32_t VertexFormatNumComponents(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar4:
- case dawn::VertexFormat::Char4:
- case dawn::VertexFormat::UChar4Norm:
- case dawn::VertexFormat::Char4Norm:
- case dawn::VertexFormat::UShort4:
- case dawn::VertexFormat::Short4:
- case dawn::VertexFormat::UShort4Norm:
- case dawn::VertexFormat::Short4Norm:
- case dawn::VertexFormat::Half4:
- case dawn::VertexFormat::Float4:
- case dawn::VertexFormat::UInt4:
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half4:
+ case wgpu::VertexFormat::Float4:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int4:
return 4;
- case dawn::VertexFormat::Float3:
- case dawn::VertexFormat::UInt3:
- case dawn::VertexFormat::Int3:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::Int3:
return 3;
- case dawn::VertexFormat::UChar2:
- case dawn::VertexFormat::Char2:
- case dawn::VertexFormat::UChar2Norm:
- case dawn::VertexFormat::Char2Norm:
- case dawn::VertexFormat::UShort2:
- case dawn::VertexFormat::Short2:
- case dawn::VertexFormat::UShort2Norm:
- case dawn::VertexFormat::Short2Norm:
- case dawn::VertexFormat::Half2:
- case dawn::VertexFormat::Float2:
- case dawn::VertexFormat::UInt2:
- case dawn::VertexFormat::Int2:
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::Int2:
return 2;
- case dawn::VertexFormat::Float:
- case dawn::VertexFormat::UInt:
- case dawn::VertexFormat::Int:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::Int:
return 1;
default:
UNREACHABLE();
}
}
- size_t VertexFormatComponentSize(dawn::VertexFormat format) {
+ size_t VertexFormatComponentSize(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
- case dawn::VertexFormat::UChar4:
- case dawn::VertexFormat::Char2:
- case dawn::VertexFormat::Char4:
- case dawn::VertexFormat::UChar2Norm:
- case dawn::VertexFormat::UChar4Norm:
- case dawn::VertexFormat::Char2Norm:
- case dawn::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
return sizeof(char);
- case dawn::VertexFormat::UShort2:
- case dawn::VertexFormat::UShort4:
- case dawn::VertexFormat::UShort2Norm:
- case dawn::VertexFormat::UShort4Norm:
- case dawn::VertexFormat::Short2:
- case dawn::VertexFormat::Short4:
- case dawn::VertexFormat::Short2Norm:
- case dawn::VertexFormat::Short4Norm:
- case dawn::VertexFormat::Half2:
- case dawn::VertexFormat::Half4:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
return sizeof(uint16_t);
- case dawn::VertexFormat::Float:
- case dawn::VertexFormat::Float2:
- case dawn::VertexFormat::Float3:
- case dawn::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
return sizeof(float);
- case dawn::VertexFormat::UInt:
- case dawn::VertexFormat::UInt2:
- case dawn::VertexFormat::UInt3:
- case dawn::VertexFormat::UInt4:
- case dawn::VertexFormat::Int:
- case dawn::VertexFormat::Int2:
- case dawn::VertexFormat::Int3:
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
return sizeof(int32_t);
default:
UNREACHABLE();
}
}
- size_t VertexFormatSize(dawn::VertexFormat format) {
+ size_t VertexFormatSize(wgpu::VertexFormat format) {
return VertexFormatNumComponents(format) * VertexFormatComponentSize(format);
}
@@ -280,7 +280,9 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
- DAWN_TRY(device->ValidateObject(descriptor->layout));
+ if (descriptor->layout != nullptr) {
+ DAWN_TRY(device->ValidateObject(descriptor->layout));
+ }
// TODO(crbug.com/dawn/136): Support vertex-only pipelines.
if (descriptor->fragmentStage == nullptr) {
@@ -288,8 +290,8 @@ namespace dawn_native {
}
std::bitset<kMaxVertexAttributes> attributesSetMask;
- if (descriptor->vertexInput) {
- DAWN_TRY(ValidateVertexInputDescriptor(descriptor->vertexInput, &attributesSetMask));
+ if (descriptor->vertexState) {
+ DAWN_TRY(ValidateVertexStateDescriptor(descriptor->vertexState, &attributesSetMask));
}
DAWN_TRY(ValidatePrimitiveTopology(descriptor->primitiveTopology));
@@ -305,7 +307,7 @@ namespace dawn_native {
if ((descriptor->vertexStage.module->GetUsedVertexAttributes() & ~attributesSetMask)
.any()) {
return DAWN_VALIDATION_ERROR(
- "Pipeline vertex stage uses inputs not in the input state");
+ "Pipeline vertex stage uses vertex buffers not in the vertex state");
}
if (!IsValidSampleCount(descriptor->sampleCount)) {
@@ -344,33 +346,32 @@ namespace dawn_native {
}
bool StencilTestEnabled(const DepthStencilStateDescriptor* mDepthStencilState) {
- return mDepthStencilState->stencilBack.compare != dawn::CompareFunction::Always ||
- mDepthStencilState->stencilBack.failOp != dawn::StencilOperation::Keep ||
- mDepthStencilState->stencilBack.depthFailOp != dawn::StencilOperation::Keep ||
- mDepthStencilState->stencilBack.passOp != dawn::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.compare != dawn::CompareFunction::Always ||
- mDepthStencilState->stencilFront.failOp != dawn::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.depthFailOp != dawn::StencilOperation::Keep ||
- mDepthStencilState->stencilFront.passOp != dawn::StencilOperation::Keep;
+ return mDepthStencilState->stencilBack.compare != wgpu::CompareFunction::Always ||
+ mDepthStencilState->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencilState->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencilState->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+ mDepthStencilState->stencilFront.compare != wgpu::CompareFunction::Always ||
+ mDepthStencilState->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+ mDepthStencilState->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+ mDepthStencilState->stencilFront.passOp != wgpu::StencilOperation::Keep;
}
bool BlendEnabled(const ColorStateDescriptor* mColorState) {
- return mColorState->alphaBlend.operation != dawn::BlendOperation::Add ||
- mColorState->alphaBlend.srcFactor != dawn::BlendFactor::One ||
- mColorState->alphaBlend.dstFactor != dawn::BlendFactor::Zero ||
- mColorState->colorBlend.operation != dawn::BlendOperation::Add ||
- mColorState->colorBlend.srcFactor != dawn::BlendFactor::One ||
- mColorState->colorBlend.dstFactor != dawn::BlendFactor::Zero;
+ return mColorState->alphaBlend.operation != wgpu::BlendOperation::Add ||
+ mColorState->alphaBlend.srcFactor != wgpu::BlendFactor::One ||
+ mColorState->alphaBlend.dstFactor != wgpu::BlendFactor::Zero ||
+ mColorState->colorBlend.operation != wgpu::BlendOperation::Add ||
+ mColorState->colorBlend.srcFactor != wgpu::BlendFactor::One ||
+ mColorState->colorBlend.dstFactor != wgpu::BlendFactor::Zero;
}
// RenderPipelineBase
RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor,
- bool blueprint)
+ const RenderPipelineDescriptor* descriptor)
: PipelineBase(device,
descriptor->layout,
- dawn::ShaderStage::Vertex | dawn::ShaderStage::Fragment),
+ wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment),
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)),
mPrimitiveTopology(descriptor->primitiveTopology),
mSampleMask(descriptor->sampleMask),
@@ -378,31 +379,32 @@ namespace dawn_native {
mVertexModule(descriptor->vertexStage.module),
mVertexEntryPoint(descriptor->vertexStage.entryPoint),
mFragmentModule(descriptor->fragmentStage->module),
- mFragmentEntryPoint(descriptor->fragmentStage->entryPoint),
- mIsBlueprint(blueprint) {
- if (descriptor->vertexInput != nullptr) {
- mVertexInput = *descriptor->vertexInput;
+ mFragmentEntryPoint(descriptor->fragmentStage->entryPoint) {
+ if (descriptor->vertexState != nullptr) {
+ mVertexState = *descriptor->vertexState;
} else {
- mVertexInput = VertexInputDescriptor();
+ mVertexState = VertexStateDescriptor();
}
- for (uint32_t slot = 0; slot < mVertexInput.bufferCount; ++slot) {
- if (mVertexInput.buffers[slot].attributeCount == 0) {
+ for (uint32_t slot = 0; slot < mVertexState.vertexBufferCount; ++slot) {
+ if (mVertexState.vertexBuffers[slot].attributeCount == 0) {
continue;
}
- mInputsSetMask.set(slot);
- mInputInfos[slot].stride = mVertexInput.buffers[slot].stride;
- mInputInfos[slot].stepMode = mVertexInput.buffers[slot].stepMode;
+ mVertexBufferSlotsUsed.set(slot);
+ mVertexBufferInfos[slot].arrayStride = mVertexState.vertexBuffers[slot].arrayStride;
+ mVertexBufferInfos[slot].stepMode = mVertexState.vertexBuffers[slot].stepMode;
uint32_t location = 0;
- for (uint32_t i = 0; i < mVertexInput.buffers[slot].attributeCount; ++i) {
- location = mVertexInput.buffers[slot].attributes[i].shaderLocation;
- mAttributesSetMask.set(location);
+ for (uint32_t i = 0; i < mVertexState.vertexBuffers[slot].attributeCount; ++i) {
+ location = mVertexState.vertexBuffers[slot].attributes[i].shaderLocation;
+ mAttributeLocationsUsed.set(location);
mAttributeInfos[location].shaderLocation = location;
- mAttributeInfos[location].inputSlot = slot;
- mAttributeInfos[location].offset = mVertexInput.buffers[slot].attributes[i].offset;
- mAttributeInfos[location].format = mVertexInput.buffers[slot].attributes[i].format;
+ mAttributeInfos[location].vertexBufferSlot = slot;
+ mAttributeInfos[location].offset =
+ mVertexState.vertexBuffers[slot].attributes[i].offset;
+ mAttributeInfos[location].format =
+ mVertexState.vertexBuffers[slot].attributes[i].format;
}
}
@@ -419,16 +421,16 @@ namespace dawn_native {
// The values indicate that depth and stencil test are disabled when backends
// set their own depth stencil states/descriptors according to the values in
// mDepthStencilState.
- mDepthStencilState.depthCompare = dawn::CompareFunction::Always;
+ mDepthStencilState.depthCompare = wgpu::CompareFunction::Always;
mDepthStencilState.depthWriteEnabled = false;
- mDepthStencilState.stencilBack.compare = dawn::CompareFunction::Always;
- mDepthStencilState.stencilBack.failOp = dawn::StencilOperation::Keep;
- mDepthStencilState.stencilBack.depthFailOp = dawn::StencilOperation::Keep;
- mDepthStencilState.stencilBack.passOp = dawn::StencilOperation::Keep;
- mDepthStencilState.stencilFront.compare = dawn::CompareFunction::Always;
- mDepthStencilState.stencilFront.failOp = dawn::StencilOperation::Keep;
- mDepthStencilState.stencilFront.depthFailOp = dawn::StencilOperation::Keep;
- mDepthStencilState.stencilFront.passOp = dawn::StencilOperation::Keep;
+ mDepthStencilState.stencilBack.compare = wgpu::CompareFunction::Always;
+ mDepthStencilState.stencilBack.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencilState.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencilState.stencilBack.passOp = wgpu::StencilOperation::Keep;
+ mDepthStencilState.stencilFront.compare = wgpu::CompareFunction::Always;
+ mDepthStencilState.stencilFront.failOp = wgpu::StencilOperation::Keep;
+ mDepthStencilState.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+ mDepthStencilState.stencilFront.passOp = wgpu::StencilOperation::Keep;
mDepthStencilState.stencilReadMask = 0xff;
mDepthStencilState.stencilWriteMask = 0xff;
}
@@ -451,37 +453,36 @@ namespace dawn_native {
}
RenderPipelineBase::~RenderPipelineBase() {
- // Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncacheRenderPipeline(this);
}
}
- const VertexInputDescriptor* RenderPipelineBase::GetVertexInputDescriptor() const {
+ const VertexStateDescriptor* RenderPipelineBase::GetVertexStateDescriptor() const {
ASSERT(!IsError());
- return &mVertexInput;
+ return &mVertexState;
}
- const std::bitset<kMaxVertexAttributes>& RenderPipelineBase::GetAttributesSetMask() const {
+ const std::bitset<kMaxVertexAttributes>& RenderPipelineBase::GetAttributeLocationsUsed() const {
ASSERT(!IsError());
- return mAttributesSetMask;
+ return mAttributeLocationsUsed;
}
const VertexAttributeInfo& RenderPipelineBase::GetAttribute(uint32_t location) const {
ASSERT(!IsError());
- ASSERT(mAttributesSetMask[location]);
+ ASSERT(mAttributeLocationsUsed[location]);
return mAttributeInfos[location];
}
- const std::bitset<kMaxVertexBuffers>& RenderPipelineBase::GetInputsSetMask() const {
+ const std::bitset<kMaxVertexBuffers>& RenderPipelineBase::GetVertexBufferSlotsUsed() const {
ASSERT(!IsError());
- return mInputsSetMask;
+ return mVertexBufferSlotsUsed;
}
- const VertexBufferInfo& RenderPipelineBase::GetInput(uint32_t slot) const {
+ const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(uint32_t slot) const {
ASSERT(!IsError());
- ASSERT(mInputsSetMask[slot]);
- return mInputInfos[slot];
+ ASSERT(mVertexBufferSlotsUsed[slot]);
+ return mVertexBufferInfos[slot];
}
const ColorStateDescriptor* RenderPipelineBase::GetColorStateDescriptor(
@@ -496,17 +497,17 @@ namespace dawn_native {
return &mDepthStencilState;
}
- dawn::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
+ wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
ASSERT(!IsError());
return mPrimitiveTopology;
}
- dawn::CullMode RenderPipelineBase::GetCullMode() const {
+ wgpu::CullMode RenderPipelineBase::GetCullMode() const {
ASSERT(!IsError());
return mRasterizationState.cullMode;
}
- dawn::FrontFace RenderPipelineBase::GetFrontFace() const {
+ wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
ASSERT(!IsError());
return mRasterizationState.frontFace;
}
@@ -521,12 +522,12 @@ namespace dawn_native {
return mAttachmentState->HasDepthStencilAttachment();
}
- dawn::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(uint32_t attachment) const {
+ wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(uint32_t attachment) const {
ASSERT(!IsError());
return mColorStates[attachment].format;
}
- dawn::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
+ wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
ASSERT(!IsError());
ASSERT(mAttachmentState->HasDepthStencilAttachment());
return mDepthStencilState.format;
@@ -543,10 +544,10 @@ namespace dawn_native {
return mAttachmentState.Get();
}
- std::bitset<kMaxVertexAttributes> RenderPipelineBase::GetAttributesUsingInput(
+ std::bitset<kMaxVertexAttributes> RenderPipelineBase::GetAttributesUsingVertexBuffer(
uint32_t slot) const {
ASSERT(!IsError());
- return attributesUsingInput[slot];
+ return attributesUsingVertexBuffer[slot];
}
size_t RenderPipelineBase::HashFunc::operator()(const RenderPipelineBase* pipeline) const {
@@ -581,20 +582,21 @@ namespace dawn_native {
desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
}
- // Hash vertex input state
- HashCombine(&hash, pipeline->mAttributesSetMask);
- for (uint32_t i : IterateBitSet(pipeline->mAttributesSetMask)) {
+ // Hash vertex state
+ HashCombine(&hash, pipeline->mAttributeLocationsUsed);
+ for (uint32_t i : IterateBitSet(pipeline->mAttributeLocationsUsed)) {
const VertexAttributeInfo& desc = pipeline->GetAttribute(i);
- HashCombine(&hash, desc.shaderLocation, desc.inputSlot, desc.offset, desc.format);
+ HashCombine(&hash, desc.shaderLocation, desc.vertexBufferSlot, desc.offset,
+ desc.format);
}
- HashCombine(&hash, pipeline->mInputsSetMask);
- for (uint32_t i : IterateBitSet(pipeline->mInputsSetMask)) {
- const VertexBufferInfo& desc = pipeline->GetInput(i);
- HashCombine(&hash, desc.stride, desc.stepMode);
+ HashCombine(&hash, pipeline->mVertexBufferSlotsUsed);
+ for (uint32_t i : IterateBitSet(pipeline->mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& desc = pipeline->GetVertexBuffer(i);
+ HashCombine(&hash, desc.arrayStride, desc.stepMode);
}
- HashCombine(&hash, pipeline->mVertexInput.indexFormat);
+ HashCombine(&hash, pipeline->mVertexState.indexFormat);
// Hash rasterization state
{
@@ -669,34 +671,34 @@ namespace dawn_native {
}
}
- // Check vertex input state
- if (a->mAttributesSetMask != b->mAttributesSetMask) {
+ // Check vertex state
+ if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
return false;
}
- for (uint32_t i : IterateBitSet(a->mAttributesSetMask)) {
+ for (uint32_t i : IterateBitSet(a->mAttributeLocationsUsed)) {
const VertexAttributeInfo& descA = a->GetAttribute(i);
const VertexAttributeInfo& descB = b->GetAttribute(i);
if (descA.shaderLocation != descB.shaderLocation ||
- descA.inputSlot != descB.inputSlot || descA.offset != descB.offset ||
+ descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
descA.format != descB.format) {
return false;
}
}
- if (a->mInputsSetMask != b->mInputsSetMask) {
+ if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
return false;
}
- for (uint32_t i : IterateBitSet(a->mInputsSetMask)) {
- const VertexBufferInfo& descA = a->GetInput(i);
- const VertexBufferInfo& descB = b->GetInput(i);
- if (descA.stride != descB.stride || descA.stepMode != descB.stepMode) {
+ for (uint32_t i : IterateBitSet(a->mVertexBufferSlotsUsed)) {
+ const VertexBufferInfo& descA = a->GetVertexBuffer(i);
+ const VertexBufferInfo& descB = b->GetVertexBuffer(i);
+ if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
return false;
}
}
- if (a->mVertexInput.indexFormat != b->mVertexInput.indexFormat) {
+ if (a->mVertexState.indexFormat != b->mVertexState.indexFormat) {
return false;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
index 490d178563d..9cc76dea82d 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
@@ -28,61 +28,60 @@ namespace dawn_native {
struct BeginRenderPassCmd;
class DeviceBase;
- class RenderBundleEncoderBase;
+ class RenderBundleEncoder;
MaybeError ValidateRenderPipelineDescriptor(const DeviceBase* device,
const RenderPipelineDescriptor* descriptor);
- size_t IndexFormatSize(dawn::IndexFormat format);
- uint32_t VertexFormatNumComponents(dawn::VertexFormat format);
- size_t VertexFormatComponentSize(dawn::VertexFormat format);
- size_t VertexFormatSize(dawn::VertexFormat format);
+ size_t IndexFormatSize(wgpu::IndexFormat format);
+ uint32_t VertexFormatNumComponents(wgpu::VertexFormat format);
+ size_t VertexFormatComponentSize(wgpu::VertexFormat format);
+ size_t VertexFormatSize(wgpu::VertexFormat format);
bool StencilTestEnabled(const DepthStencilStateDescriptor* mDepthStencilState);
bool BlendEnabled(const ColorStateDescriptor* mColorState);
struct VertexAttributeInfo {
- uint32_t shaderLocation;
- uint32_t inputSlot;
+ wgpu::VertexFormat format;
uint64_t offset;
- dawn::VertexFormat format;
+ uint32_t shaderLocation;
+ uint32_t vertexBufferSlot;
};
struct VertexBufferInfo {
- uint64_t stride;
- dawn::InputStepMode stepMode;
+ uint64_t arrayStride;
+ wgpu::InputStepMode stepMode;
};
class RenderPipelineBase : public PipelineBase {
public:
- RenderPipelineBase(DeviceBase* device,
- const RenderPipelineDescriptor* descriptor,
- bool blueprint = false);
+ RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
~RenderPipelineBase() override;
static RenderPipelineBase* MakeError(DeviceBase* device);
- const VertexInputDescriptor* GetVertexInputDescriptor() const;
- const std::bitset<kMaxVertexAttributes>& GetAttributesSetMask() const;
+ const VertexStateDescriptor* GetVertexStateDescriptor() const;
+ const std::bitset<kMaxVertexAttributes>& GetAttributeLocationsUsed() const;
const VertexAttributeInfo& GetAttribute(uint32_t location) const;
- const std::bitset<kMaxVertexBuffers>& GetInputsSetMask() const;
- const VertexBufferInfo& GetInput(uint32_t slot) const;
+ const std::bitset<kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
+ const VertexBufferInfo& GetVertexBuffer(uint32_t slot) const;
const ColorStateDescriptor* GetColorStateDescriptor(uint32_t attachmentSlot) const;
const DepthStencilStateDescriptor* GetDepthStencilStateDescriptor() const;
- dawn::PrimitiveTopology GetPrimitiveTopology() const;
- dawn::CullMode GetCullMode() const;
- dawn::FrontFace GetFrontFace() const;
+ wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+ wgpu::CullMode GetCullMode() const;
+ wgpu::FrontFace GetFrontFace() const;
std::bitset<kMaxColorAttachments> GetColorAttachmentsMask() const;
bool HasDepthStencilAttachment() const;
- dawn::TextureFormat GetColorAttachmentFormat(uint32_t attachment) const;
- dawn::TextureFormat GetDepthStencilFormat() const;
+ wgpu::TextureFormat GetColorAttachmentFormat(uint32_t attachment) const;
+ wgpu::TextureFormat GetDepthStencilFormat() const;
uint32_t GetSampleCount() const;
const AttachmentState* GetAttachmentState() const;
- std::bitset<kMaxVertexAttributes> GetAttributesUsingInput(uint32_t slot) const;
- std::array<std::bitset<kMaxVertexAttributes>, kMaxVertexBuffers> attributesUsingInput;
+ std::bitset<kMaxVertexAttributes> GetAttributesUsingVertexBuffer(uint32_t slot) const;
+ std::array<std::bitset<kMaxVertexAttributes>, kMaxVertexBuffers>
+ attributesUsingVertexBuffer;
// Functors necessary for the unordered_set<RenderPipelineBase*>-based cache.
struct HashFunc {
@@ -95,12 +94,12 @@ namespace dawn_native {
private:
RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- // Vertex input
- VertexInputDescriptor mVertexInput;
- std::bitset<kMaxVertexAttributes> mAttributesSetMask;
+ // Vertex state
+ VertexStateDescriptor mVertexState;
+ std::bitset<kMaxVertexAttributes> mAttributeLocationsUsed;
std::array<VertexAttributeInfo, kMaxVertexAttributes> mAttributeInfos;
- std::bitset<kMaxVertexBuffers> mInputsSetMask;
- std::array<VertexBufferInfo, kMaxVertexBuffers> mInputInfos;
+ std::bitset<kMaxVertexBuffers> mVertexBufferSlotsUsed;
+ std::array<VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
// Attachments
Ref<AttachmentState> mAttachmentState;
@@ -108,7 +107,7 @@ namespace dawn_native {
std::array<ColorStateDescriptor, kMaxColorAttachments> mColorStates;
// Other state
- dawn::PrimitiveTopology mPrimitiveTopology;
+ wgpu::PrimitiveTopology mPrimitiveTopology;
RasterizationStateDescriptor mRasterizationState;
uint32_t mSampleMask;
bool mAlphaToCoverageEnabled;
@@ -119,8 +118,6 @@ namespace dawn_native {
std::string mVertexEntryPoint;
Ref<ShaderModuleBase> mFragmentModule;
std::string mFragmentEntryPoint;
-
- bool mIsBlueprint = false;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h
index e932c91cd50..1b0fd621de6 100644
--- a/chromium/third_party/dawn/src/dawn_native/MemoryAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceHeapAllocator.h
@@ -12,22 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_MEMORYALLOCATOR_H_
-#define DAWNNATIVE_MEMORYALLOCATOR_H_
+#ifndef DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
+#define DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
#include "dawn_native/Error.h"
#include "dawn_native/ResourceHeap.h"
+#include <memory>
+
namespace dawn_native {
- // Interface for backend allocators that create physical device memory.
- class MemoryAllocator {
+
+ // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
+ class ResourceHeapAllocator {
public:
- virtual ~MemoryAllocator() = default;
+ virtual ~ResourceHeapAllocator() = default;
- virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> Allocate(uint64_t size,
- int memoryFlags) = 0;
- virtual void Deallocate(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+ virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) = 0;
+ virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
};
+
} // namespace dawn_native
-#endif // DAWNNATIVE_MEMORYALLOCATOR_H_
+#endif // DAWNNATIVE_RESOURCEHEAPALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
index 12b42a3ee44..a66129ac649 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.h
@@ -54,14 +54,14 @@ namespace dawn_native {
uint64_t offset,
ResourceHeapBase* resourceHeap,
uint8_t* mappedPointer = nullptr);
- ~ResourceMemoryAllocation() = default;
+ virtual ~ResourceMemoryAllocation() = default;
ResourceHeapBase* GetResourceHeap() const;
uint64_t GetOffset() const;
uint8_t* GetMappedPointer() const;
AllocationInfo GetInfo() const;
- void Invalidate();
+ virtual void Invalidate();
private:
AllocationInfo mInfo;
@@ -71,4 +71,4 @@ namespace dawn_native {
};
} // namespace dawn_native
-#endif // DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_ \ No newline at end of file
+#endif // DAWNNATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
index 6cb94b70489..6c3eef0ce03 100644
--- a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.cpp
@@ -29,7 +29,7 @@
// TODO(bryan.bernhart@intel.com): Follow-up with ringbuffer optimization.
namespace dawn_native {
- RingBufferAllocator::RingBufferAllocator(size_t maxSize) : mMaxBlockSize(maxSize) {
+ RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
}
void RingBufferAllocator::Deallocate(Serial lastCompletedSerial) {
@@ -43,11 +43,11 @@ namespace dawn_native {
mInflightRequests.ClearUpTo(lastCompletedSerial);
}
- size_t RingBufferAllocator::GetSize() const {
+ uint64_t RingBufferAllocator::GetSize() const {
return mMaxBlockSize;
}
- size_t RingBufferAllocator::GetUsedSize() const {
+ uint64_t RingBufferAllocator::GetUsedSize() const {
return mUsedSize;
}
@@ -62,7 +62,7 @@ namespace dawn_native {
// queue, which identifies an existing (or new) frames-worth of resources. Internally, the
// ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
// in FIFO order as older frames would free resources before newer ones.
- size_t RingBufferAllocator::Allocate(size_t allocationSize, Serial serial) {
+ uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, Serial serial) {
// Check if the buffer is full by comparing the used size.
// If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
// subsequent sub-alloc could fail where the used size was previously adjusted to include
@@ -71,7 +71,13 @@ namespace dawn_native {
return kInvalidOffset;
}
- size_t startOffset = kInvalidOffset;
+ // Ensure adding allocationSize does not overflow.
+ const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
+ if (allocationSize > remainingSize) {
+ return kInvalidOffset;
+ }
+
+ uint64_t startOffset = kInvalidOffset;
// Check if the buffer is NOT split (i.e sub-alloc on ends)
if (mUsedStartOffset <= mUsedEndOffset) {
@@ -86,7 +92,7 @@ namespace dawn_native {
} else if (allocationSize <= mUsedStartOffset) { // Try to sub-alloc at front.
// Count the space at the end so that a subsequent
// sub-alloc cannot not succeed when the buffer is full.
- const size_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
+ const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
startOffset = 0;
mUsedEndOffset = allocationSize;
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
index 60ee6395c45..e437632d7c4 100644
--- a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
@@ -26,32 +26,32 @@ namespace dawn_native {
class RingBufferAllocator {
public:
RingBufferAllocator() = default;
- RingBufferAllocator(size_t maxSize);
+ RingBufferAllocator(uint64_t maxSize);
~RingBufferAllocator() = default;
- size_t Allocate(size_t allocationSize, Serial serial);
+ uint64_t Allocate(uint64_t allocationSize, Serial serial);
void Deallocate(Serial lastCompletedSerial);
- size_t GetSize() const;
+ uint64_t GetSize() const;
bool Empty() const;
- size_t GetUsedSize() const;
+ uint64_t GetUsedSize() const;
- static constexpr size_t kInvalidOffset = std::numeric_limits<size_t>::max();
+ static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
private:
struct Request {
- size_t endOffset;
- size_t size;
+ uint64_t endOffset;
+ uint64_t size;
};
SerialQueue<Request> mInflightRequests; // Queue of the recorded sub-alloc requests (e.g.
// frame of resources).
- size_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
- size_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
- size_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
- size_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
- size_t mCurrentRequestSize =
+ uint64_t mUsedEndOffset = 0; // Tail of used sub-alloc requests (in bytes).
+ uint64_t mUsedStartOffset = 0; // Head of used sub-alloc requests (in bytes).
+ uint64_t mMaxBlockSize = 0; // Max size of the ring buffer (in bytes).
+ uint64_t mUsedSize = 0; // Size of the sub-alloc requests (in bytes) of the ring buffer.
+ uint64_t mCurrentRequestSize =
0; // Size of the sub-alloc requests (in bytes) of the current serial.
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
index 749624090ae..230da340e02 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.cpp
@@ -52,10 +52,8 @@ namespace dawn_native {
// SamplerBase
- SamplerBase::SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- bool blueprint)
- : ObjectBase(device),
+ SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
+ : CachedObject(device),
mAddressModeU(descriptor->addressModeU),
mAddressModeV(descriptor->addressModeV),
mAddressModeW(descriptor->addressModeW),
@@ -64,17 +62,15 @@ namespace dawn_native {
mMipmapFilter(descriptor->mipmapFilter),
mLodMinClamp(descriptor->lodMinClamp),
mLodMaxClamp(descriptor->lodMaxClamp),
- mCompareFunction(descriptor->compare),
- mIsBlueprint(blueprint) {
+ mCompareFunction(descriptor->compare) {
}
SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : CachedObject(device, tag) {
}
SamplerBase::~SamplerBase() {
- // Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncacheSampler(this);
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Sampler.h b/chromium/third_party/dawn/src/dawn_native/Sampler.h
index 202f3cde1f4..a642422e785 100644
--- a/chromium/third_party/dawn/src/dawn_native/Sampler.h
+++ b/chromium/third_party/dawn/src/dawn_native/Sampler.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_SAMPLER_H_
#define DAWNNATIVE_SAMPLER_H_
+#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
-#include "dawn_native/ObjectBase.h"
#include "dawn_native/dawn_platform.h"
@@ -26,11 +26,9 @@ namespace dawn_native {
MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
- class SamplerBase : public ObjectBase {
+ class SamplerBase : public CachedObject {
public:
- SamplerBase(DeviceBase* device,
- const SamplerDescriptor* descriptor,
- bool blueprint = false);
+ SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
~SamplerBase() override;
static SamplerBase* MakeError(DeviceBase* device);
@@ -47,16 +45,15 @@ namespace dawn_native {
SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
// TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
- dawn::AddressMode mAddressModeU;
- dawn::AddressMode mAddressModeV;
- dawn::AddressMode mAddressModeW;
- dawn::FilterMode mMagFilter;
- dawn::FilterMode mMinFilter;
- dawn::FilterMode mMipmapFilter;
+ wgpu::AddressMode mAddressModeU;
+ wgpu::AddressMode mAddressModeV;
+ wgpu::AddressMode mAddressModeW;
+ wgpu::FilterMode mMagFilter;
+ wgpu::FilterMode mMinFilter;
+ wgpu::FilterMode mMipmapFilter;
float mLodMinClamp;
float mLodMaxClamp;
- dawn::CompareFunction mCompareFunction;
- bool mIsBlueprint = false;
+ wgpu::CompareFunction mCompareFunction;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index f19a123b344..dfa9acf4a92 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -41,6 +41,31 @@ namespace dawn_native {
return Format::Other;
}
}
+
+ wgpu::TextureViewDimension SpirvDimToTextureViewDimension(spv::Dim dim, bool arrayed) {
+ switch (dim) {
+ case spv::Dim::Dim1D:
+ return wgpu::TextureViewDimension::e1D;
+ case spv::Dim::Dim2D:
+ if (arrayed) {
+ return wgpu::TextureViewDimension::e2DArray;
+ } else {
+ return wgpu::TextureViewDimension::e2D;
+ }
+ case spv::Dim::Dim3D:
+ return wgpu::TextureViewDimension::e3D;
+ case spv::Dim::DimCube:
+ if (arrayed) {
+ return wgpu::TextureViewDimension::CubeArray;
+ } else {
+ return wgpu::TextureViewDimension::Cube;
+ }
+ default:
+ UNREACHABLE();
+ return wgpu::TextureViewDimension::Undefined;
+ }
+ }
+
} // anonymous namespace
MaybeError ValidateShaderModuleDescriptor(DeviceBase*,
@@ -84,22 +109,17 @@ namespace dawn_native {
// ShaderModuleBase
- ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- bool blueprint)
- : ObjectBase(device),
- mCode(descriptor->code, descriptor->code + descriptor->codeSize),
- mIsBlueprint(blueprint) {
+ ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
+ : CachedObject(device), mCode(descriptor->code, descriptor->code + descriptor->codeSize) {
mFragmentOutputFormatBaseTypes.fill(Format::Other);
}
ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag) {
+ : CachedObject(device, tag) {
}
ShaderModuleBase::~ShaderModuleBase() {
- // Do not uncache the actual cached object if we are a blueprint
- if (!mIsBlueprint && !IsError()) {
+ if (IsCachedReference()) {
GetDevice()->UncacheShaderModule(this);
}
}
@@ -132,7 +152,7 @@ namespace dawn_native {
}
if (resources.push_constant_buffers.size() > 0) {
- GetDevice()->HandleError(dawn::ErrorType::Validation,
+ GetDevice()->HandleError(wgpu::ErrorType::Validation,
"Push constants aren't supported.");
}
@@ -140,7 +160,7 @@ namespace dawn_native {
auto ExtractResourcesBinding = [this](const spirv_cross::SmallVector<spirv_cross::Resource>&
resources,
const spirv_cross::Compiler& compiler,
- dawn::BindingType bindingType) {
+ wgpu::BindingType bindingType) {
for (const auto& resource : resources) {
ASSERT(compiler.get_decoration_bitset(resource.id).get(spv::DecorationBinding));
ASSERT(
@@ -150,26 +170,53 @@ namespace dawn_native {
uint32_t set = compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
if (binding >= kMaxBindingsPerGroup || set >= kMaxBindGroups) {
- GetDevice()->HandleError(dawn::ErrorType::Validation,
+ GetDevice()->HandleError(wgpu::ErrorType::Validation,
"Binding over limits in the SPIRV");
continue;
}
- auto& info = mBindingInfo[set][binding];
- info.used = true;
- info.id = resource.id;
- info.base_type_id = resource.base_type_id;
- info.type = bindingType;
+ BindingInfo* info = &mBindingInfo[set][binding];
+ *info = {};
+ info->used = true;
+ info->id = resource.id;
+ info->base_type_id = resource.base_type_id;
+ switch (bindingType) {
+ case wgpu::BindingType::SampledTexture: {
+ spirv_cross::SPIRType::ImageType imageType =
+ compiler.get_type(info->base_type_id).image;
+ spirv_cross::SPIRType::BaseType textureComponentType =
+ compiler.get_type(imageType.type).basetype;
+
+ info->multisampled = imageType.ms;
+ info->textureDimension =
+ SpirvDimToTextureViewDimension(imageType.dim, imageType.arrayed);
+ info->textureComponentType =
+ SpirvCrossBaseTypeToFormatType(textureComponentType);
+ info->type = bindingType;
+ } break;
+ case wgpu::BindingType::StorageBuffer: {
+ // Differentiate between readonly storage bindings and writable ones based
+ // on the NonWritable decoration
+ spirv_cross::Bitset flags = compiler.get_buffer_block_flags(resource.id);
+ if (flags.get(spv::DecorationNonWritable)) {
+ info->type = wgpu::BindingType::ReadonlyStorageBuffer;
+ } else {
+ info->type = wgpu::BindingType::StorageBuffer;
+ }
+ } break;
+ default:
+ info->type = bindingType;
+ }
}
};
ExtractResourcesBinding(resources.uniform_buffers, compiler,
- dawn::BindingType::UniformBuffer);
+ wgpu::BindingType::UniformBuffer);
ExtractResourcesBinding(resources.separate_images, compiler,
- dawn::BindingType::SampledTexture);
- ExtractResourcesBinding(resources.separate_samplers, compiler, dawn::BindingType::Sampler);
+ wgpu::BindingType::SampledTexture);
+ ExtractResourcesBinding(resources.separate_samplers, compiler, wgpu::BindingType::Sampler);
ExtractResourcesBinding(resources.storage_buffers, compiler,
- dawn::BindingType::StorageBuffer);
+ wgpu::BindingType::StorageBuffer);
// Extract the vertex attributes
if (mExecutionModel == SingleShaderStage::Vertex) {
@@ -178,7 +225,7 @@ namespace dawn_native {
uint32_t location = compiler.get_decoration(attrib.id, spv::DecorationLocation);
if (location >= kMaxVertexAttributes) {
- device->HandleError(dawn::ErrorType::Validation,
+ device->HandleError(wgpu::ErrorType::Validation,
"Attribute location over limits in the SPIRV");
return;
}
@@ -190,7 +237,7 @@ namespace dawn_native {
// all the location 0, causing a compile error.
for (const auto& attrib : resources.stage_outputs) {
if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- device->HandleError(dawn::ErrorType::Validation,
+ device->HandleError(wgpu::ErrorType::Validation,
"Need location qualifier on vertex output");
return;
}
@@ -202,7 +249,7 @@ namespace dawn_native {
// all the location 0, causing a compile error.
for (const auto& attrib : resources.stage_inputs) {
if (!compiler.get_decoration_bitset(attrib.id).get(spv::DecorationLocation)) {
- device->HandleError(dawn::ErrorType::Validation,
+ device->HandleError(wgpu::ErrorType::Validation,
"Need location qualifier on fragment input");
return;
}
@@ -214,7 +261,7 @@ namespace dawn_native {
uint32_t location =
compiler.get_decoration(fragmentOutput.id, spv::DecorationLocation);
if (location >= kMaxColorAttachments) {
- device->HandleError(dawn::ErrorType::Validation,
+ device->HandleError(wgpu::ErrorType::Validation,
"Fragment output location over limits in the SPIRV");
return;
}
@@ -250,7 +297,7 @@ namespace dawn_native {
return mExecutionModel;
}
- bool ShaderModuleBase::IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) {
+ bool ShaderModuleBase::IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) const {
ASSERT(!IsError());
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
@@ -270,8 +317,9 @@ namespace dawn_native {
return true;
}
- bool ShaderModuleBase::IsCompatibleWithBindGroupLayout(size_t group,
- const BindGroupLayoutBase* layout) {
+ bool ShaderModuleBase::IsCompatibleWithBindGroupLayout(
+ size_t group,
+ const BindGroupLayoutBase* layout) const {
ASSERT(!IsError());
const auto& layoutInfo = layout->GetBindingInfo();
@@ -284,12 +332,33 @@ namespace dawn_native {
}
if (layoutBindingType != moduleInfo.type) {
- return false;
+ // Binding mismatch between shader and bind group is invalid. For example, a
+ // writable binding in the shader with a readonly storage buffer in the bind group
+ // layout is invalid. However, a readonly binding in the shader with a writable
+ // storage buffer in the bind group layout is valid.
+ bool validBindingConversion =
+ layoutBindingType == wgpu::BindingType::StorageBuffer &&
+ moduleInfo.type == wgpu::BindingType::ReadonlyStorageBuffer;
+ if (!validBindingConversion) {
+ return false;
+ }
}
if ((layoutInfo.visibilities[i] & StageBit(mExecutionModel)) == 0) {
return false;
}
+
+ if (layoutBindingType == wgpu::BindingType::SampledTexture) {
+ Format::Type layoutTextureComponentType =
+ Format::TextureComponentTypeToFormatType(layoutInfo.textureComponentTypes[i]);
+ if (layoutTextureComponentType != moduleInfo.textureComponentType) {
+ return false;
+ }
+
+ if (layoutInfo.textureDimensions[i] != moduleInfo.textureDimension) {
+ return false;
+ }
+ }
}
return true;
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index f2c133c8353..da69c4d98c6 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -16,14 +16,16 @@
#define DAWNNATIVE_SHADERMODULE_H_
#include "common/Constants.h"
+#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
#include "dawn_native/Format.h"
#include "dawn_native/Forward.h"
-#include "dawn_native/ObjectBase.h"
#include "dawn_native/PerStage.h"
#include "dawn_native/dawn_platform.h"
+#include "spvc/spvc.hpp"
+
#include <array>
#include <bitset>
#include <vector>
@@ -37,11 +39,9 @@ namespace dawn_native {
MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
const ShaderModuleDescriptor* descriptor);
- class ShaderModuleBase : public ObjectBase {
+ class ShaderModuleBase : public CachedObject {
public:
- ShaderModuleBase(DeviceBase* device,
- const ShaderModuleDescriptor* descriptor,
- bool blueprint = false);
+ ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
~ShaderModuleBase() override;
static ShaderModuleBase* MakeError(DeviceBase* device);
@@ -52,7 +52,11 @@ namespace dawn_native {
// The SPIRV ID of the resource.
uint32_t id;
uint32_t base_type_id;
- dawn::BindingType type;
+ wgpu::BindingType type;
+ // Match the defaults in BindGroupLayoutDescriptor
+ wgpu::TextureViewDimension textureDimension = wgpu::TextureViewDimension::Undefined;
+ Format::Type textureComponentType = Format::Type::Float;
+ bool multisampled = false;
bool used = false;
};
using ModuleBindingInfo =
@@ -67,7 +71,7 @@ namespace dawn_native {
using FragmentOutputBaseTypes = std::array<Format::Type, kMaxColorAttachments>;
const FragmentOutputBaseTypes& GetFragmentOutputBaseTypes() const;
- bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout);
+ bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) const;
// Functors necessary for the unordered_set<ShaderModuleBase*>-based cache.
struct HashFunc {
@@ -77,15 +81,17 @@ namespace dawn_native {
bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
};
+ protected:
+ shaderc_spvc::Context mSpvcContext;
+
private:
ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout);
+ bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout) const;
// TODO(cwallez@chromium.org): The code is only stored for deduplication. We could maybe
// store a cryptographic hash of the code instead?
std::vector<uint32_t> mCode;
- bool mIsBlueprint = false;
ModuleBindingInfo mBindingInfo;
std::bitset<kMaxVertexAttributes> mUsedVertexAttributes;
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
index 8194fe15462..37549d38c53 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.cpp
@@ -80,8 +80,8 @@ namespace dawn_native {
return new ErrorSwapChain(device);
}
- void SwapChainBase::Configure(dawn::TextureFormat format,
- dawn::TextureUsage allowedUsage,
+ void SwapChainBase::Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
uint32_t width,
uint32_t height) {
if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
@@ -89,24 +89,33 @@ namespace dawn_native {
}
ASSERT(!IsError());
- allowedUsage |= dawn::TextureUsage::Present;
+ allowedUsage |= wgpu::TextureUsage::Present;
mFormat = format;
mAllowedUsage = allowedUsage;
mWidth = width;
mHeight = height;
- mImplementation.Configure(mImplementation.userData, static_cast<DawnTextureFormat>(format),
- static_cast<DawnTextureUsage>(allowedUsage), width, height);
+ mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
+ static_cast<WGPUTextureUsage>(allowedUsage), width, height);
}
- TextureBase* SwapChainBase::GetNextTexture() {
- if (GetDevice()->ConsumedError(ValidateGetNextTexture())) {
- return TextureBase::MakeError(GetDevice());
+ TextureViewBase* SwapChainBase::GetCurrentTextureView() {
+ if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+ return TextureViewBase::MakeError(GetDevice());
}
ASSERT(!IsError());
+ // Return the same current texture view until Present is called.
+ if (mCurrentTextureView.Get() != nullptr) {
+ // Calling GetCurrentTextureView always returns a new reference so add it even when
+ // reuse the existing texture view.
+ mCurrentTextureView->Reference();
+ return mCurrentTextureView.Get();
+ }
+
+ // Create the backing texture and the view.
TextureDescriptor descriptor;
- descriptor.dimension = dawn::TextureDimension::e2D;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = mWidth;
descriptor.size.height = mHeight;
descriptor.size.depth = 1;
@@ -116,21 +125,28 @@ namespace dawn_native {
descriptor.mipLevelCount = 1;
descriptor.usage = mAllowedUsage;
- auto* texture = GetNextTextureImpl(&descriptor);
- mLastNextTexture = texture;
- return texture;
+ // Get the texture but remove the external refcount because it is never passed outside
+ // of dawn_native
+ mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
+
+ mCurrentTextureView = mCurrentTexture->CreateView(nullptr);
+ return mCurrentTextureView.Get();
}
- void SwapChainBase::Present(TextureBase* texture) {
- if (GetDevice()->ConsumedError(ValidatePresent(texture))) {
+ void SwapChainBase::Present() {
+ if (GetDevice()->ConsumedError(ValidatePresent())) {
return;
}
ASSERT(!IsError());
- if (GetDevice()->ConsumedError(OnBeforePresent(texture)))
+ if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTexture.Get()))) {
return;
+ }
mImplementation.Present(mImplementation.userData);
+
+ mCurrentTexture = nullptr;
+ mCurrentTextureView = nullptr;
}
const DawnSwapChainImplementation& SwapChainBase::GetImplementation() {
@@ -138,8 +154,8 @@ namespace dawn_native {
return mImplementation;
}
- MaybeError SwapChainBase::ValidateConfigure(dawn::TextureFormat format,
- dawn::TextureUsage allowedUsage,
+ MaybeError SwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
uint32_t width,
uint32_t height) const {
DAWN_TRY(GetDevice()->ValidateObject(this));
@@ -154,7 +170,7 @@ namespace dawn_native {
return {};
}
- MaybeError SwapChainBase::ValidateGetNextTexture() const {
+ MaybeError SwapChainBase::ValidateGetCurrentTextureView() const {
DAWN_TRY(GetDevice()->ValidateObject(this));
if (mWidth == 0) {
@@ -165,14 +181,12 @@ namespace dawn_native {
return {};
}
- MaybeError SwapChainBase::ValidatePresent(TextureBase* texture) const {
+ MaybeError SwapChainBase::ValidatePresent() const {
DAWN_TRY(GetDevice()->ValidateObject(this));
- DAWN_TRY(GetDevice()->ValidateObject(texture));
- // This also checks that the texture is valid since mLastNextTexture is always valid.
- if (texture != mLastNextTexture) {
+ if (mCurrentTextureView.Get() == nullptr) {
return DAWN_VALIDATION_ERROR(
- "Tried to present something other than the last NextTexture");
+ "Cannot call present without a GetCurrentTextureView call for this frame");
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/SwapChain.h b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
index c9b65028796..7742293aec3 100644
--- a/chromium/third_party/dawn/src/dawn_native/SwapChain.h
+++ b/chromium/third_party/dawn/src/dawn_native/SwapChain.h
@@ -35,12 +35,12 @@ namespace dawn_native {
static SwapChainBase* MakeError(DeviceBase* device);
// Dawn API
- void Configure(dawn::TextureFormat format,
- dawn::TextureUsage allowedUsage,
+ void Configure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
uint32_t width,
uint32_t height);
- TextureBase* GetNextTexture();
- void Present(TextureBase* texture);
+ TextureViewBase* GetCurrentTextureView();
+ void Present();
protected:
SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
@@ -50,19 +50,20 @@ namespace dawn_native {
virtual MaybeError OnBeforePresent(TextureBase* texture) = 0;
private:
- MaybeError ValidateConfigure(dawn::TextureFormat format,
- dawn::TextureUsage allowedUsage,
+ MaybeError ValidateConfigure(wgpu::TextureFormat format,
+ wgpu::TextureUsage allowedUsage,
uint32_t width,
uint32_t height) const;
- MaybeError ValidateGetNextTexture() const;
- MaybeError ValidatePresent(TextureBase* texture) const;
+ MaybeError ValidateGetCurrentTextureView() const;
+ MaybeError ValidatePresent() const;
DawnSwapChainImplementation mImplementation = {};
- dawn::TextureFormat mFormat = {};
- dawn::TextureUsage mAllowedUsage;
+ wgpu::TextureFormat mFormat = {};
+ wgpu::TextureUsage mAllowedUsage;
uint32_t mWidth = 0;
uint32_t mHeight = 0;
- TextureBase* mLastNextTexture = nullptr;
+ Ref<TextureBase> mCurrentTexture;
+ Ref<TextureViewBase> mCurrentTextureView;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index bcaa923d914..b6a50d67c26 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -37,14 +37,14 @@ namespace dawn_native {
// TODO(jiawei.shao@intel.com): support validation on all texture view dimensions
bool IsTextureViewDimensionCompatibleWithTextureDimension(
- dawn::TextureViewDimension textureViewDimension,
- dawn::TextureDimension textureDimension) {
+ wgpu::TextureViewDimension textureViewDimension,
+ wgpu::TextureDimension textureDimension) {
switch (textureViewDimension) {
- case dawn::TextureViewDimension::e2D:
- case dawn::TextureViewDimension::e2DArray:
- case dawn::TextureViewDimension::Cube:
- case dawn::TextureViewDimension::CubeArray:
- return textureDimension == dawn::TextureDimension::e2D;
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ return textureDimension == wgpu::TextureDimension::e2D;
default:
UNREACHABLE();
return false;
@@ -53,16 +53,16 @@ namespace dawn_native {
// TODO(jiawei.shao@intel.com): support validation on all texture view dimensions
bool IsArrayLayerValidForTextureViewDimension(
- dawn::TextureViewDimension textureViewDimension,
+ wgpu::TextureViewDimension textureViewDimension,
uint32_t textureViewArrayLayer) {
switch (textureViewDimension) {
- case dawn::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2D:
return textureViewArrayLayer == 1u;
- case dawn::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e2DArray:
return true;
- case dawn::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::Cube:
return textureViewArrayLayer == 6u;
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::CubeArray:
return textureViewArrayLayer % 6 == 0;
default:
UNREACHABLE();
@@ -71,14 +71,14 @@ namespace dawn_native {
}
bool IsTextureSizeValidForTextureViewDimension(
- dawn::TextureViewDimension textureViewDimension,
+ wgpu::TextureViewDimension textureViewDimension,
const Extent3D& textureSize) {
switch (textureViewDimension) {
- case dawn::TextureViewDimension::Cube:
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
return textureSize.width == textureSize.height;
- case dawn::TextureViewDimension::e2D:
- case dawn::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
return true;
default:
UNREACHABLE();
@@ -159,21 +159,21 @@ namespace dawn_native {
MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor, const Format* format) {
DAWN_TRY(dawn_native::ValidateTextureUsage(descriptor->usage));
- constexpr dawn::TextureUsage kValidCompressedUsages = dawn::TextureUsage::Sampled |
- dawn::TextureUsage::CopySrc |
- dawn::TextureUsage::CopyDst;
+ constexpr wgpu::TextureUsage kValidCompressedUsages = wgpu::TextureUsage::Sampled |
+ wgpu::TextureUsage::CopySrc |
+ wgpu::TextureUsage::CopyDst;
if (format->isCompressed && (descriptor->usage & (~kValidCompressedUsages))) {
return DAWN_VALIDATION_ERROR(
"Compressed texture format is incompatible with the texture usage");
}
if (!format->isRenderable &&
- (descriptor->usage & dawn::TextureUsage::OutputAttachment)) {
+ (descriptor->usage & wgpu::TextureUsage::OutputAttachment)) {
return DAWN_VALIDATION_ERROR(
"Non-renderable format used with OutputAttachment usage");
}
- if (descriptor->usage & dawn::TextureUsage::Storage) {
+ if (descriptor->usage & wgpu::TextureUsage::Storage) {
return DAWN_VALIDATION_ERROR("storage textures aren't supported (yet)");
}
@@ -205,7 +205,7 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Cannot create an empty texture");
}
- if (descriptor->dimension != dawn::TextureDimension::e2D) {
+ if (descriptor->dimension != wgpu::TextureDimension::e2D) {
return DAWN_VALIDATION_ERROR("Texture dimension must be 2D (for now)");
}
@@ -228,15 +228,15 @@ namespace dawn_native {
}
DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
- if (descriptor->dimension == dawn::TextureViewDimension::e1D ||
- descriptor->dimension == dawn::TextureViewDimension::e3D) {
+ if (descriptor->dimension == wgpu::TextureViewDimension::e1D ||
+ descriptor->dimension == wgpu::TextureViewDimension::e3D) {
return DAWN_VALIDATION_ERROR("Texture view dimension must be 2D compatible.");
}
DAWN_TRY(ValidateTextureFormat(descriptor->format));
DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
- if (descriptor->aspect != dawn::TextureAspect::All) {
+ if (descriptor->aspect != wgpu::TextureAspect::All) {
return DAWN_VALIDATION_ERROR("Texture aspect must be 'all'");
}
@@ -273,22 +273,22 @@ namespace dawn_native {
// The default value for the view dimension depends on the texture's dimension with a
// special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
- if (desc.dimension == dawn::TextureViewDimension::Undefined) {
+ if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
switch (texture->GetDimension()) {
- case dawn::TextureDimension::e1D:
- desc.dimension = dawn::TextureViewDimension::e1D;
+ case wgpu::TextureDimension::e1D:
+ desc.dimension = wgpu::TextureViewDimension::e1D;
break;
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (texture->GetArrayLayers() > 1u && desc.arrayLayerCount == 0) {
- desc.dimension = dawn::TextureViewDimension::e2DArray;
+ desc.dimension = wgpu::TextureViewDimension::e2DArray;
} else {
- desc.dimension = dawn::TextureViewDimension::e2D;
+ desc.dimension = wgpu::TextureViewDimension::e2D;
}
break;
- case dawn::TextureDimension::e3D:
- desc.dimension = dawn::TextureViewDimension::e3D;
+ case wgpu::TextureDimension::e3D:
+ desc.dimension = wgpu::TextureViewDimension::e3D;
break;
default:
@@ -296,7 +296,7 @@ namespace dawn_native {
}
}
- if (desc.format == dawn::TextureFormat::Undefined) {
+ if (desc.format == wgpu::TextureFormat::Undefined) {
desc.format = texture->GetFormat().format;
}
if (desc.arrayLayerCount == 0) {
@@ -349,7 +349,7 @@ namespace dawn_native {
return new TextureBase(device, ObjectBase::kError);
}
- dawn::TextureDimension TextureBase::GetDimension() const {
+ wgpu::TextureDimension TextureBase::GetDimension() const {
ASSERT(!IsError());
return mDimension;
}
@@ -375,7 +375,7 @@ namespace dawn_native {
ASSERT(!IsError());
return mSampleCount;
}
- dawn::TextureUsage TextureBase::GetUsage() const {
+ wgpu::TextureUsage TextureBase::GetUsage() const {
ASSERT(!IsError());
return mUsage;
}
@@ -481,9 +481,7 @@ namespace dawn_native {
}
void TextureBase::DestroyInternal() {
- if (mState == TextureState::OwnedInternal) {
- DestroyImpl();
- }
+ DestroyImpl();
mState = TextureState::Destroyed;
}
@@ -529,7 +527,7 @@ namespace dawn_native {
return mFormat;
}
- dawn::TextureViewDimension TextureViewBase::GetDimension() const {
+ wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
ASSERT(!IsError());
return mDimension;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index fafd46edcca..24a894f4ccd 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -34,12 +34,12 @@ namespace dawn_native {
bool IsValidSampleCount(uint32_t sampleCount);
- static constexpr dawn::TextureUsage kReadOnlyTextureUsages =
- dawn::TextureUsage::CopySrc | dawn::TextureUsage::Sampled | dawn::TextureUsage::Present;
+ static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
+ wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::Sampled | wgpu::TextureUsage::Present;
- static constexpr dawn::TextureUsage kWritableTextureUsages =
- dawn::TextureUsage::CopyDst | dawn::TextureUsage::Storage |
- dawn::TextureUsage::OutputAttachment;
+ static constexpr wgpu::TextureUsage kWritableTextureUsages =
+ wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::Storage |
+ wgpu::TextureUsage::OutputAttachment;
class TextureBase : public ObjectBase {
public:
@@ -49,13 +49,13 @@ namespace dawn_native {
static TextureBase* MakeError(DeviceBase* device);
- dawn::TextureDimension GetDimension() const;
+ wgpu::TextureDimension GetDimension() const;
const Format& GetFormat() const;
const Extent3D& GetSize() const;
uint32_t GetArrayLayers() const;
uint32_t GetNumMipLevels() const;
uint32_t GetSampleCount() const;
- dawn::TextureUsage GetUsage() const;
+ wgpu::TextureUsage GetUsage() const;
TextureState GetTextureState() const;
uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice) const;
bool IsSubresourceContentInitialized(uint32_t baseMipLevel,
@@ -92,14 +92,14 @@ namespace dawn_native {
virtual void DestroyImpl();
MaybeError ValidateDestroy() const;
- dawn::TextureDimension mDimension;
+ wgpu::TextureDimension mDimension;
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
Extent3D mSize;
uint32_t mArrayLayerCount;
uint32_t mMipLevelCount;
uint32_t mSampleCount;
- dawn::TextureUsage mUsage = dawn::TextureUsage::None;
+ wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
TextureState mState;
// TODO(natlee@microsoft.com): Use a more optimized data structure to save space
@@ -116,7 +116,7 @@ namespace dawn_native {
TextureBase* GetTexture();
const Format& GetFormat() const;
- dawn::TextureViewDimension GetDimension() const;
+ wgpu::TextureViewDimension GetDimension() const;
uint32_t GetBaseMipLevel() const;
uint32_t GetLevelCount() const;
uint32_t GetBaseArrayLayer() const;
@@ -129,7 +129,7 @@ namespace dawn_native {
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
- dawn::TextureViewDimension mDimension;
+ wgpu::TextureViewDimension mDimension;
uint32_t mBaseMipLevel;
uint32_t mMipLevelCount;
uint32_t mBaseArrayLayer;
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index 7cc6f355c1d..8f5247fdf33 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -70,7 +70,27 @@ namespace dawn_native {
"workaround is enabled by default on all Vulkan drivers to solve an issue in the "
"Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
"(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
- "https://bugs.chromium.org/p/dawn/issues/detail?id=42"}}}};
+ "https://bugs.chromium.org/p/dawn/issues/detail?id=42"}},
+ {Toggle::UseD3D12ResourceHeapTier2,
+ {"use_d3d12_resource_heap_tier2",
+ "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+ "texture and buffers in the same heap. This allows better heap re-use and reduces "
+ "fragmentation."}},
+ {Toggle::UseD3D12RenderPass,
+ {"use_d3d12_render_pass",
+ "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+ "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+ "will emulate a render pass."}},
+ {Toggle::SkipValidation,
+ {"skip_validation", "Skip expensive validation of Dawn commands."}},
+ {Toggle::UseSpvc,
+ {"use_spvc",
+ "Enable use of spvc for shader compilation, instead of accessing spirv_cross "
+ "directly."}},
+ {Toggle::UseSpvcIRGen,
+ {"use_spvc_ir_gen",
+ "Enable usage of spvc's internal parsing and IR generation code, instead of "
+ "spirv_cross's."}}}};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index d5265946dc1..aa5c4f9bcd9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -30,6 +30,11 @@ namespace dawn_native {
LazyClearResourceOnFirstUse,
TurnOffVsync,
UseTemporaryBufferInCompressedTextureToTextureCopy,
+ UseD3D12ResourceHeapTier2,
+ UseD3D12RenderPass,
+ SkipValidation,
+ UseSpvc,
+ UseSpvcIRGen,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 0d9292b3bc8..38ca3a4c975 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -49,7 +49,7 @@ namespace dawn_native { namespace d3d12 {
}
switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer: {
+ case wgpu::BindingType::UniformBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
@@ -62,7 +62,7 @@ namespace dawn_native { namespace d3d12 {
&desc, cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
} break;
- case dawn::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
// Since SPIRV-Cross outputs HLSL shaders with RWByteAddressBuffer,
@@ -86,7 +86,7 @@ namespace dawn_native { namespace d3d12 {
cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
} break;
- case dawn::BindingType::SampledTexture: {
+ case wgpu::BindingType::SampledTexture: {
auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
auto& srv = view->GetSRVDescriptor();
d3d12Device->CreateShaderResourceView(
@@ -94,7 +94,7 @@ namespace dawn_native { namespace d3d12 {
cbvUavSrvHeapStart.GetCPUHandle(*cbvUavSrvHeapOffset +
bindingOffsets[bindingIndex]));
} break;
- case dawn::BindingType::Sampler: {
+ case wgpu::BindingType::Sampler: {
auto* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
auto& samplerDesc = sampler->GetSamplerDescriptor();
d3d12Device->CreateSampler(
@@ -102,8 +102,8 @@ namespace dawn_native { namespace d3d12 {
bindingOffsets[bindingIndex]));
} break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index e0627b4be30..b8107108425 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -32,21 +32,21 @@ namespace dawn_native { namespace d3d12 {
}
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
mBindingOffsets[binding] = mDescriptorCounts[CBV]++;
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
mBindingOffsets[binding] = mDescriptorCounts[UAV]++;
break;
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::SampledTexture:
mBindingOffsets[binding] = mDescriptorCounts[SRV]++;
break;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
mBindingOffsets[binding] = mDescriptorCounts[Sampler]++;
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
}
@@ -99,14 +99,14 @@ namespace dawn_native { namespace d3d12 {
// Root descriptor needs to set this value to set correct register number in
// generated HLSL shader.
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer:
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
mBindingOffsets[binding] = baseRegister++;
break;
- case dawn::BindingType::SampledTexture:
- case dawn::BindingType::Sampler:
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
}
@@ -114,21 +114,21 @@ namespace dawn_native { namespace d3d12 {
}
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
mBindingOffsets[binding] += descriptorOffsets[CBV];
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
mBindingOffsets[binding] += descriptorOffsets[UAV];
break;
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::SampledTexture:
mBindingOffsets[binding] += descriptorOffsets[SRV];
break;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
mBindingOffsets[binding] += descriptorOffsets[Sampler];
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index a0a208305f3..b875403bf9a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -24,45 +24,45 @@
namespace dawn_native { namespace d3d12 {
namespace {
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(dawn::BufferUsage usage) {
+ D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
- if (usage & dawn::BufferUsage::Storage) {
+ if (usage & wgpu::BufferUsage::Storage) {
flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
return flags;
}
- D3D12_RESOURCE_STATES D3D12BufferUsage(dawn::BufferUsage usage) {
+ D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
- if (usage & dawn::BufferUsage::CopySrc) {
+ if (usage & wgpu::BufferUsage::CopySrc) {
resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
}
- if (usage & dawn::BufferUsage::CopyDst) {
+ if (usage & wgpu::BufferUsage::CopyDst) {
resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
}
- if (usage & (dawn::BufferUsage::Vertex | dawn::BufferUsage::Uniform)) {
+ if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
}
- if (usage & dawn::BufferUsage::Index) {
+ if (usage & wgpu::BufferUsage::Index) {
resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
}
- if (usage & dawn::BufferUsage::Storage) {
+ if (usage & wgpu::BufferUsage::Storage) {
resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
}
- if (usage & dawn::BufferUsage::Indirect) {
+ if (usage & wgpu::BufferUsage::Indirect) {
resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
}
return resourceState;
}
- D3D12_HEAP_TYPE D3D12HeapType(dawn::BufferUsage allowedUsage) {
- if (allowedUsage & dawn::BufferUsage::MapRead) {
+ D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
+ if (allowedUsage & wgpu::BufferUsage::MapRead) {
return D3D12_HEAP_TYPE_READBACK;
- } else if (allowedUsage & dawn::BufferUsage::MapWrite) {
+ } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
return D3D12_HEAP_TYPE_UPLOAD;
} else {
return D3D12_HEAP_TYPE_DEFAULT;
@@ -88,7 +88,7 @@ namespace dawn_native { namespace d3d12 {
resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
// Add CopyDst for non-mappable buffer initialization in CreateBufferMapped
// and robust resource initialization.
- resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | dawn::BufferUsage::CopyDst);
+ resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
auto heapType = D3D12HeapType(GetUsage());
auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
@@ -98,7 +98,7 @@ namespace dawn_native { namespace d3d12 {
if (heapType == D3D12_HEAP_TYPE_READBACK) {
bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
mFixedResourceState = true;
- mLastUsage = dawn::BufferUsage::CopyDst;
+ mLastUsage = wgpu::BufferUsage::CopyDst;
}
// D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
@@ -106,13 +106,12 @@ namespace dawn_native { namespace d3d12 {
if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
mFixedResourceState = true;
- mLastUsage = dawn::BufferUsage::CopySrc;
+ mLastUsage = wgpu::BufferUsage::CopySrc;
}
DAWN_TRY_ASSIGN(
mResourceAllocation,
- ToBackend(GetDevice())
- ->AllocateMemory(heapType, resourceDescriptor, bufferUsage, D3D12_HEAP_FLAG_NONE));
+ ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
return {};
}
@@ -134,21 +133,35 @@ namespace dawn_native { namespace d3d12 {
// cause subsequent errors.
bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
- dawn::BufferUsage newUsage) {
+ wgpu::BufferUsage newUsage) {
// Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
if (mFixedResourceState) {
ASSERT(mLastUsage == newUsage);
return false;
}
+ D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
+ D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+
+ // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+ // If one of the usages isn't UAV, then other barriers are used.
+ bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+ newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+
+ if (needsUAVBarrier) {
+ barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+ barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+ barrier->UAV.pResource = GetD3D12Resource().Get();
+
+ mLastUsage = newUsage;
+ return true;
+ }
+
// We can skip transitions to already current usages.
- // TODO(cwallez@chromium.org): Need some form of UAV barriers at some point.
if ((mLastUsage & newUsage) == newUsage) {
return false;
}
- D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
- D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
mLastUsage = newUsage;
// The COMMON state represents a state where no write operations can be pending, which makes
@@ -191,7 +204,7 @@ namespace dawn_native { namespace d3d12 {
}
void Buffer::TransitionUsageNow(CommandRecordingContext* commandContext,
- dawn::BufferUsage usage) {
+ wgpu::BufferUsage usage) {
D3D12_RESOURCE_BARRIER barrier;
if (TransitionUsageAndGetResourceBarrier(commandContext, &barrier, usage)) {
@@ -205,15 +218,15 @@ namespace dawn_native { namespace d3d12 {
void Buffer::OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite) {
if (isWrite) {
- CallMapWriteCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
} else {
- CallMapReadCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
}
}
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
- return (GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) != 0;
+ return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 289c4ecc8f1..6a5b9366a40 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -39,8 +39,8 @@ namespace dawn_native { namespace d3d12 {
void OnMapCommandSerialFinished(uint32_t mapSerial, void* data, bool isWrite);
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
- dawn::BufferUsage newUsage);
- void TransitionUsageNow(CommandRecordingContext* commandContext, dawn::BufferUsage usage);
+ wgpu::BufferUsage newUsage);
+ void TransitionUsageNow(CommandRecordingContext* commandContext, wgpu::BufferUsage usage);
private:
// Dawn API
@@ -54,7 +54,7 @@ namespace dawn_native { namespace d3d12 {
ResourceHeapAllocation mResourceAllocation;
bool mFixedResourceState = false;
- dawn::BufferUsage mLastUsage = dawn::BufferUsage::None;
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
Serial mLastUsedSerial = UINT64_MAX;
D3D12_RANGE mWrittenMappedRange;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index d095395f60c..1b8118ea6a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -15,7 +15,7 @@
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "common/Assert.h"
-#include "dawn_native/BindGroupTracker.h"
+#include "dawn_native/BindGroupAndStorageBarrierTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
@@ -28,8 +28,8 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
+#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
-#include "dawn_native/d3d12/ResourceAllocator.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
#include "dawn_native/d3d12/TextureD3D12.h"
@@ -41,11 +41,11 @@ namespace dawn_native { namespace d3d12 {
namespace {
- DXGI_FORMAT DXGIIndexFormat(dawn::IndexFormat format) {
+ DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
switch (format) {
- case dawn::IndexFormat::Uint16:
+ case wgpu::IndexFormat::Uint16:
return DXGI_FORMAT_R16_UINT;
- case dawn::IndexFormat::Uint32:
+ case wgpu::IndexFormat::Uint32:
return DXGI_FORMAT_R32_UINT;
default:
UNREACHABLE();
@@ -66,17 +66,12 @@ namespace dawn_native { namespace d3d12 {
return false;
}
- struct OMSetRenderTargetArgs {
- unsigned int numRTVs = 0;
- std::array<D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments> RTVs = {};
- D3D12_CPU_DESCRIPTOR_HANDLE dsv = {};
- };
-
} // anonymous namespace
- class BindGroupStateTracker : public BindGroupTrackerBase<BindGroup*, false> {
+ class BindGroupStateTracker : public BindGroupAndStorageBarrierTrackerBase<false, uint64_t> {
public:
- BindGroupStateTracker(Device* device) : BindGroupTrackerBase(), mDevice(device) {
+ BindGroupStateTracker(Device* device)
+ : BindGroupAndStorageBarrierTrackerBase(), mDevice(device) {
}
void SetInComputePass(bool inCompute_) {
@@ -137,10 +132,41 @@ namespace dawn_native { namespace d3d12 {
}
}
- void Apply(ID3D12GraphicsCommandList* commandList) {
+ void Apply(CommandRecordingContext* commandContext) {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, mBindGroups[index],
- mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+ ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index,
+ ToBackend(mBindGroups[index]), mDynamicOffsetCounts[index],
+ mDynamicOffsets[index].data());
+ }
+
+ if (mInCompute) {
+ for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
+ for (uint32_t binding : IterateBitSet(mBuffersNeedingBarrier[index])) {
+ wgpu::BindingType bindingType = mBindingTypes[index][binding];
+ switch (bindingType) {
+ case wgpu::BindingType::StorageBuffer:
+ ToBackend(mBuffers[index][binding])
+ ->TransitionUsageNow(commandContext,
+ wgpu::BufferUsage::Storage);
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ // Not implemented.
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ // Don't require barriers.
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
}
DidApply();
}
@@ -193,7 +219,7 @@ namespace dawn_native { namespace d3d12 {
ToBackend(binding.buffer)->GetVA() + offset;
switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
if (mInCompute) {
commandList->SetComputeRootConstantBufferView(parameterIndex,
bufferLocation);
@@ -202,7 +228,7 @@ namespace dawn_native { namespace d3d12 {
bufferLocation);
}
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
if (mInCompute) {
commandList->SetComputeRootUnorderedAccessView(parameterIndex,
bufferLocation);
@@ -211,10 +237,10 @@ namespace dawn_native { namespace d3d12 {
bufferLocation);
}
break;
- case dawn::BindingType::SampledTexture:
- case dawn::BindingType::Sampler:
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
}
@@ -366,29 +392,30 @@ namespace dawn_native { namespace d3d12 {
auto* d3d12BufferView = &mD3D12BufferViews[slot];
d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
d3d12BufferView->SizeInBytes = buffer->GetSize() - offset;
- // The bufferView stride is set based on the input state before a draw.
+ // The bufferView stride is set based on the vertex state before a draw.
}
void Apply(ID3D12GraphicsCommandList* commandList,
const RenderPipeline* renderPipeline) {
ASSERT(renderPipeline != nullptr);
- std::bitset<kMaxVertexBuffers> inputsMask = renderPipeline->GetInputsSetMask();
+ std::bitset<kMaxVertexBuffers> vertexBufferSlotsUsed =
+ renderPipeline->GetVertexBufferSlotsUsed();
uint32_t startSlot = mStartSlot;
uint32_t endSlot = mEndSlot;
- // If the input state has changed, we need to update the StrideInBytes
+ // If the vertex state has changed, we need to update the StrideInBytes
// for the D3D12 buffer views. We also need to extend the dirty range to
// touch all these slots because the stride may have changed.
if (mLastAppliedRenderPipeline != renderPipeline) {
mLastAppliedRenderPipeline = renderPipeline;
- for (uint32_t slot : IterateBitSet(inputsMask)) {
+ for (uint32_t slot : IterateBitSet(vertexBufferSlotsUsed)) {
startSlot = std::min(startSlot, slot);
endSlot = std::max(endSlot, slot + 1);
mD3D12BufferViews[slot].StrideInBytes =
- renderPipeline->GetInput(slot).stride;
+ renderPipeline->GetVertexBuffer(slot).arrayStride;
}
}
@@ -431,7 +458,7 @@ namespace dawn_native { namespace d3d12 {
void OnSetPipeline(const RenderPipelineBase* pipeline) {
mD3D12BufferView.Format =
- DXGIIndexFormat(pipeline->GetVertexInputDescriptor()->indexFormat);
+ DXGIIndexFormat(pipeline->GetVertexStateDescriptor()->indexFormat);
}
void Apply(ID3D12GraphicsCommandList* commandList) {
@@ -462,7 +489,7 @@ namespace dawn_native { namespace d3d12 {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
if (cmd->dynamicOffsetCount) {
- commands->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindingTracker->TrackSetBindGroup(group, cmd->index, indexInSubmit);
} break;
@@ -539,8 +566,7 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
- CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
- const CommandBufferDescriptor* descriptor)
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
}
@@ -569,11 +595,13 @@ namespace dawn_native { namespace d3d12 {
// Records the necessary barriers for the resource usage pre-computed by the frontend
auto TransitionForPass = [](CommandRecordingContext* commandContext,
- const PassResourceUsage& usages) {
+ const PassResourceUsage& usages) -> bool {
std::vector<D3D12_RESOURCE_BARRIER> barriers;
ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+
for (size_t i = 0; i < usages.buffers.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.buffers[i])
@@ -581,6 +609,7 @@ namespace dawn_native { namespace d3d12 {
usages.bufferUsages[i])) {
barriers.push_back(barrier);
}
+ bufferUsages |= usages.bufferUsages[i];
}
for (size_t i = 0; i < usages.textures.size(); ++i) {
@@ -588,13 +617,15 @@ namespace dawn_native { namespace d3d12 {
// Clear textures that are not output attachments. Output attachments will be
// cleared during record render pass if the texture subresource has not been
// initialized before the render pass.
- if (!(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment)) {
+ if (!(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment)) {
texture->EnsureSubresourceContentInitialized(commandContext, 0,
texture->GetNumMipLevels(), 0,
texture->GetArrayLayers());
}
}
+ wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
+
for (size_t i = 0; i < usages.textures.size(); ++i) {
D3D12_RESOURCE_BARRIER barrier;
if (ToBackend(usages.textures[i])
@@ -602,11 +633,15 @@ namespace dawn_native { namespace d3d12 {
usages.textureUsages[i])) {
barriers.push_back(barrier);
}
+ textureUsages |= usages.textureUsages[i];
}
if (barriers.size()) {
commandList->ResourceBarrier(barriers.size(), barriers.data());
}
+
+ return (bufferUsages & wgpu::BufferUsage::Storage ||
+ textureUsages & wgpu::TextureUsage::Storage);
};
const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
@@ -620,7 +655,7 @@ namespace dawn_native { namespace d3d12 {
TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(true);
- RecordComputePass(commandList, &bindingTracker);
+ RecordComputePass(commandContext, &bindingTracker);
nextPassNumber++;
} break;
@@ -629,10 +664,11 @@ namespace dawn_native { namespace d3d12 {
BeginRenderPassCmd* beginRenderPassCmd =
mCommands.NextCommand<BeginRenderPassCmd>();
- TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
+ const bool passHasUAV =
+ TransitionForPass(commandContext, passResourceUsages[nextPassNumber]);
bindingTracker.SetInComputePass(false);
RecordRenderPass(commandContext, &bindingTracker, &renderPassTracker,
- beginRenderPassCmd);
+ beginRenderPassCmd, passHasUAV);
nextPassNumber++;
} break;
@@ -642,8 +678,8 @@ namespace dawn_native { namespace d3d12 {
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
- srcBuffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopyDst);
+ srcBuffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopyDst);
commandList->CopyBufferRegion(
dstBuffer->GetD3D12Resource().Get(), copy->destinationOffset,
@@ -665,8 +701,8 @@ namespace dawn_native { namespace d3d12 {
copy->destination.arrayLayer, 1);
}
- buffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopySrc);
- texture->TransitionUsageNow(commandContext, dawn::TextureUsage::CopyDst);
+ buffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopySrc);
+ texture->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopyDst);
auto copySplit = ComputeTextureCopySplit(
copy->destination.origin, copy->copySize, texture->GetFormat(),
@@ -700,8 +736,8 @@ namespace dawn_native { namespace d3d12 {
texture->EnsureSubresourceContentInitialized(
commandContext, copy->source.mipLevel, 1, copy->source.arrayLayer, 1);
- texture->TransitionUsageNow(commandContext, dawn::TextureUsage::CopySrc);
- buffer->TransitionUsageNow(commandContext, dawn::BufferUsage::CopyDst);
+ texture->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopySrc);
+ buffer->TransitionUsageNow(commandContext, wgpu::BufferUsage::CopyDst);
TextureCopySplit copySplit = ComputeTextureCopySplit(
copy->source.origin, copy->copySize, texture->GetFormat(),
@@ -747,8 +783,8 @@ namespace dawn_native { namespace d3d12 {
commandContext, copy->destination.mipLevel, 1,
copy->destination.arrayLayer, 1);
}
- source->TransitionUsageNow(commandContext, dawn::TextureUsage::CopySrc);
- destination->TransitionUsageNow(commandContext, dawn::TextureUsage::CopyDst);
+ source->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopySrc);
+ destination->TransitionUsageNow(commandContext, wgpu::TextureUsage::CopyDst);
if (CanUseCopyResource(source->GetNumMipLevels(), source->GetSize(),
destination->GetSize(), copy->copySize)) {
@@ -781,9 +817,10 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- void CommandBuffer::RecordComputePass(ID3D12GraphicsCommandList* commandList,
+ void CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker) {
PipelineLayout* lastLayout = nullptr;
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -791,14 +828,14 @@ namespace dawn_native { namespace d3d12 {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
} break;
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
@@ -828,10 +865,10 @@ namespace dawn_native { namespace d3d12 {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
@@ -879,126 +916,194 @@ namespace dawn_native { namespace d3d12 {
}
}
- void CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
- BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassTracker,
- BeginRenderPassCmd* renderPass) {
- OMSetRenderTargetArgs args = renderPassTracker->GetSubpassOMSetRenderTargetArgs(renderPass);
- ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+ void CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder) {
+ for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+ RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
+ TextureView* view = ToBackend(attachmentInfo.view.Get());
+ Texture* texture = ToBackend(view->GetTexture());
+
+ // Load operation is changed to clear when the texture is uninitialized.
+ if (!texture->IsSubresourceContentInitialized(view->GetBaseMipLevel(), 1,
+ view->GetBaseArrayLayer(), 1) &&
+ attachmentInfo.loadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+ attachmentInfo.clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
+ }
- // Clear framebuffer attachments as needed and transition to render target
- {
- for (uint32_t i :
- IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
- auto& attachmentInfo = renderPass->colorAttachments[i];
- TextureView* view = ToBackend(attachmentInfo.view.Get());
+ // Set color load operation.
+ renderPassBuilder->SetRenderTargetBeginningAccess(
+ i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
- // Load op - color
- ASSERT(view->GetLevelCount() == 1);
- ASSERT(view->GetLayerCount() == 1);
- if (attachmentInfo.loadOp == dawn::LoadOp::Clear ||
- (attachmentInfo.loadOp == dawn::LoadOp::Load &&
- !view->GetTexture()->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1))) {
- D3D12_CPU_DESCRIPTOR_HANDLE handle = args.RTVs[i];
- commandList->ClearRenderTargetView(handle, &attachmentInfo.clearColor.r, 0,
- nullptr);
- }
+ // Set color store operation.
+ if (attachmentInfo.resolveTarget.Get() != nullptr) {
+ TextureView* resolveDestinationView = ToBackend(attachmentInfo.resolveTarget.Get());
+ Texture* resolveDestinationTexture =
+ ToBackend(resolveDestinationView->GetTexture());
- TextureView* resolveView = ToBackend(attachmentInfo.resolveTarget.Get());
- if (resolveView != nullptr) {
- // We need to set the resolve target to initialized so that it does not get
- // cleared later in the pipeline. The texture will be resolved from the source
- // color attachment, which will be correctly initialized.
- ToBackend(resolveView->GetTexture())
- ->SetIsSubresourceContentInitialized(
- true, resolveView->GetBaseMipLevel(), resolveView->GetLevelCount(),
- resolveView->GetBaseArrayLayer(), resolveView->GetLayerCount());
- }
+ resolveDestinationTexture->TransitionUsageNow(commandContext,
+ D3D12_RESOURCE_STATE_RESOLVE_DEST);
- switch (attachmentInfo.storeOp) {
- case dawn::StoreOp::Store: {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
- } break;
+ // Mark resolve target as initialized to prevent clearing later.
+ resolveDestinationTexture->SetIsSubresourceContentInitialized(
+ true, resolveDestinationView->GetBaseMipLevel(), 1,
+ resolveDestinationView->GetBaseArrayLayer(), 1);
- case dawn::StoreOp::Clear: {
- view->GetTexture()->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
- } break;
+ renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
+ view, resolveDestinationView);
+ } else {
+ renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
+ }
+
+ // Set whether or not the texture requires initialization after the pass.
+ bool isInitialized = attachmentInfo.storeOp == wgpu::StoreOp::Store;
+ texture->SetIsSubresourceContentInitialized(isInitialized, view->GetBaseMipLevel(), 1,
+ view->GetBaseArrayLayer(), 1);
+ }
- default: { UNREACHABLE(); } break;
+ if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+ RenderPassDepthStencilAttachmentInfo& attachmentInfo =
+ renderPass->depthStencilAttachment;
+ TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
+ Texture* texture = ToBackend(view->GetTexture());
+
+ const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
+ const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+
+ // Load operations are changed to clear when the texture is uninitialized.
+ if (!view->GetTexture()->IsSubresourceContentInitialized(
+ view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
+ view->GetLayerCount())) {
+ if (hasDepth && attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ }
+ if (hasStencil && attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
}
}
- if (renderPass->attachmentState->HasDepthStencilAttachment()) {
- auto& attachmentInfo = renderPass->depthStencilAttachment;
- Texture* texture = ToBackend(renderPass->depthStencilAttachment.view->GetTexture());
- TextureView* view = ToBackend(attachmentInfo.view.Get());
- float clearDepth = attachmentInfo.clearDepth;
- // TODO(kainino@chromium.org): investigate: should the Dawn clear
- // stencil type be uint8_t?
- uint8_t clearStencil = static_cast<uint8_t>(attachmentInfo.clearStencil);
+ // Set depth/stencil load operations.
+ if (hasDepth) {
+ renderPassBuilder->SetDepthAccess(
+ attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+ attachmentInfo.clearDepth, view->GetD3D12Format());
+ } else {
+ renderPassBuilder->SetDepthNoAccess();
+ }
+
+ if (hasStencil) {
+ renderPassBuilder->SetStencilAccess(
+ attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+ attachmentInfo.clearStencil, view->GetD3D12Format());
+ } else {
+ renderPassBuilder->SetStencilNoAccess();
+ }
- // Load op - depth/stencil
- bool doDepthClear = texture->GetFormat().HasDepth() &&
- (attachmentInfo.depthLoadOp == dawn::LoadOp::Clear);
- bool doStencilClear = texture->GetFormat().HasStencil() &&
- (attachmentInfo.stencilLoadOp == dawn::LoadOp::Clear);
+ // Set whether or not the texture requires initialization.
+ ASSERT(!hasDepth || !hasStencil ||
+ attachmentInfo.depthStoreOp == attachmentInfo.stencilStoreOp);
+ bool isInitialized = attachmentInfo.depthStoreOp == wgpu::StoreOp::Store;
+ texture->SetIsSubresourceContentInitialized(isInitialized, view->GetBaseMipLevel(), 1,
+ view->GetBaseArrayLayer(), 1);
+ } else {
+ renderPassBuilder->SetDepthStencilNoAccess();
+ }
+ }
+ void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const {
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+ // Clear framebuffer attachments as needed.
+ {
+ for (uint32_t i = 0; i < renderPassBuilder->GetColorAttachmentCount(); i++) {
+ // Load op - color
+ if (renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
+ .BeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+ commandList->ClearRenderTargetView(
+ renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i].cpuDescriptor,
+ renderPassBuilder->GetRenderPassRenderTargetDescriptors()[i]
+ .BeginningAccess.Clear.ClearValue.Color,
+ 0, nullptr);
+ }
+ }
+
+ if (renderPassBuilder->HasDepth()) {
D3D12_CLEAR_FLAGS clearFlags = {};
- if (doDepthClear) {
+ float depthClear = 0.0f;
+ uint8_t stencilClear = 0u;
+
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Type ==
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
}
- if (doStencilClear) {
+ if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Type ==
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- }
- // If the depth stencil texture has not been initialized, we want to use loadop
- // clear to init the contents to 0's
- if (!texture->IsSubresourceContentInitialized(
- view->GetBaseMipLevel(), view->GetLevelCount(), view->GetBaseArrayLayer(),
- view->GetLayerCount())) {
- if (texture->GetFormat().HasDepth() &&
- attachmentInfo.depthLoadOp == dawn::LoadOp::Load) {
- clearDepth = 0.0f;
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- }
- if (texture->GetFormat().HasStencil() &&
- attachmentInfo.stencilLoadOp == dawn::LoadOp::Load) {
- clearStencil = 0u;
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- }
+ stencilClear =
+ renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+ ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
}
+ // TODO(kainino@chromium.org): investigate: should the Dawn clear
+ // stencil type be uint8_t?
if (clearFlags) {
- D3D12_CPU_DESCRIPTOR_HANDLE handle = args.dsv;
- commandList->ClearDepthStencilView(handle, clearFlags, clearDepth, clearStencil,
- 0, nullptr);
- }
-
- if (attachmentInfo.depthStoreOp == dawn::StoreOp::Store &&
- attachmentInfo.stencilStoreOp == dawn::StoreOp::Store) {
- texture->SetIsSubresourceContentInitialized(
- true, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
- } else if (attachmentInfo.depthStoreOp == dawn::StoreOp::Clear &&
- attachmentInfo.stencilStoreOp == dawn::StoreOp::Clear) {
- texture->SetIsSubresourceContentInitialized(
- false, view->GetBaseMipLevel(), view->GetLevelCount(),
- view->GetBaseArrayLayer(), view->GetLayerCount());
+ commandList->ClearDepthStencilView(
+ renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
+ clearFlags, depthClear, stencilClear, 0, nullptr);
}
}
}
- // Set up render targets
- {
- if (args.dsv.ptr) {
- commandList->OMSetRenderTargets(args.numRTVs, args.RTVs.data(), FALSE, &args.dsv);
- } else {
- commandList->OMSetRenderTargets(args.numRTVs, args.RTVs.data(), FALSE, nullptr);
- }
+ commandList->OMSetRenderTargets(
+ renderPassBuilder->GetColorAttachmentCount(), renderPassBuilder->GetRenderTargetViews(),
+ FALSE,
+ renderPassBuilder->HasDepth()
+ ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
+ : nullptr);
+ }
+
+ void CommandBuffer::RecordRenderPass(
+ CommandRecordingContext* commandContext,
+ BindGroupStateTracker* bindingTracker,
+ RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
+ BeginRenderPassCmd* renderPass,
+ const bool passHasUAV) {
+ OMSetRenderTargetArgs args =
+ renderPassDescriptorHeapTracker->GetSubpassOMSetRenderTargetArgs(renderPass);
+
+ const bool useRenderPass = GetDevice()->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+
+ // renderPassBuilder must be scoped to RecordRenderPass because any underlying
+ // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
+ // valid until after EndRenderPass() has been called.
+ RenderPassBuilder renderPassBuilder(args, passHasUAV);
+
+ SetupRenderPass(commandContext, renderPass, &renderPassBuilder);
+
+ // Use D3D12's native render pass API if it's available, otherwise emulate the
+ // beginning and ending access operations.
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->BeginRenderPass(
+ renderPassBuilder.GetColorAttachmentCount(),
+ renderPassBuilder.GetRenderPassRenderTargetDescriptors(),
+ renderPassBuilder.HasDepth()
+ ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
+ : nullptr,
+ renderPassBuilder.GetRenderPassFlags());
+ } else {
+ EmulateBeginRenderPass(commandContext, &renderPassBuilder);
}
+ ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
// Set up default dynamic state
{
uint32_t width = renderPass->width;
@@ -1023,7 +1128,7 @@ namespace dawn_native { namespace d3d12 {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
@@ -1032,7 +1137,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
@@ -1043,7 +1148,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
@@ -1056,7 +1161,7 @@ namespace dawn_native { namespace d3d12 {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- bindingTracker->Apply(commandList);
+ bindingTracker->Apply(commandContext);
indexBufferTracker.Apply(commandList);
vertexBufferTracker.Apply(commandList, lastPipeline);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
@@ -1122,10 +1227,10 @@ namespace dawn_native { namespace d3d12 {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
@@ -1156,10 +1261,9 @@ namespace dawn_native { namespace d3d12 {
switch (type) {
case Command::EndRenderPass: {
mCommands.NextCommand<EndRenderPassCmd>();
-
- // TODO(brandon1.jones@intel.com): avoid calling this function and enable MSAA
- // resolve in D3D12 render pass on the platforms that support this feature.
- if (renderPass->attachmentState->GetSampleCount() > 1) {
+ if (useRenderPass) {
+ commandContext->GetCommandList4()->EndRenderPass();
+ } else if (renderPass->attachmentState->GetSampleCount() > 1) {
ResolveMultisampledRenderPass(commandContext, renderPass);
}
return;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index ce7f451e256..d710d08da99 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -31,26 +31,39 @@ namespace dawn_native {
namespace dawn_native { namespace d3d12 {
+ struct OMSetRenderTargetArgs {
+ unsigned int numRTVs = 0;
+ std::array<D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments> RTVs = {};
+ D3D12_CPU_DESCRIPTOR_HANDLE dsv = {};
+ };
+
class BindGroupStateTracker;
class CommandRecordingContext;
class Device;
class RenderPassDescriptorHeapTracker;
+ class RenderPassBuilder;
class RenderPipeline;
class CommandBuffer : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
MaybeError RecordCommands(CommandRecordingContext* commandContext, uint32_t indexInSubmit);
private:
- void RecordComputePass(ID3D12GraphicsCommandList* commandList,
+ void RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker);
void RecordRenderPass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker,
- RenderPassDescriptorHeapTracker* renderPassTracker,
- BeginRenderPassCmd* renderPass);
+ RenderPassDescriptorHeapTracker* renderPassDescriptorHeapTracker,
+ BeginRenderPassCmd* renderPass,
+ bool passHasUAV);
+ void SetupRenderPass(CommandRecordingContext* commandContext,
+ BeginRenderPassCmd* renderPass,
+ RenderPassBuilder* renderPassBuilder);
+ void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+ const RenderPassBuilder* renderPassBuilder) const;
CommandIterator mCommands;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
index 4d927b8322c..209009c101b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.cpp
@@ -17,6 +17,11 @@
namespace dawn_native { namespace d3d12 {
+ void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
+ ASSERT(IsOpen());
+ mSharedTextures.insert(texture);
+ }
+
MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
CommandAllocatorManager* commandAllocationManager) {
ASSERT(!IsOpen());
@@ -36,6 +41,9 @@ namespace dawn_native { namespace d3d12 {
nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
"D3D12 creating direct command list"));
mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+ // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
+ // pass APIs introduced in Windows build 1809.
+ mD3d12CommandList.As(&mD3d12CommandList4);
}
mIsOpen = true;
@@ -43,16 +51,30 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- ResultOrError<ID3D12GraphicsCommandList*> CommandRecordingContext::Close() {
- ASSERT(IsOpen());
- mIsOpen = false;
- MaybeError error =
- CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
- if (error.IsError()) {
- mD3d12CommandList.Reset();
- DAWN_TRY(std::move(error));
+ MaybeError CommandRecordingContext::ExecuteCommandList(ID3D12CommandQueue* d3d12CommandQueue) {
+ if (IsOpen()) {
+ // Shared textures must be transitioned to common state after the last usage in order
+ // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
+ // common state right before command list submission. TransitionUsageNow itself ensures
+ // no unnecessary transitions happen if the resources is already in the common state.
+ for (Texture* texture : mSharedTextures) {
+ texture->TransitionUsageNow(this, D3D12_RESOURCE_STATE_COMMON);
+ }
+
+ MaybeError error =
+ CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+ if (error.IsError()) {
+ Release();
+ DAWN_TRY(std::move(error));
+ }
+
+ ID3D12CommandList* d3d12CommandList = GetCommandList();
+ d3d12CommandQueue->ExecuteCommandLists(1, &d3d12CommandList);
+
+ mIsOpen = false;
+ mSharedTextures.clear();
}
- return mD3d12CommandList.Get();
+ return {};
}
ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
@@ -61,9 +83,19 @@ namespace dawn_native { namespace d3d12 {
return mD3d12CommandList.Get();
}
+ // This function will fail on Windows versions prior to 1809. Support must be queried through
+ // the device before calling.
+ ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
+ ASSERT(IsOpen());
+ ASSERT(mD3d12CommandList.Get() != nullptr);
+ return mD3d12CommandList4.Get();
+ }
+
void CommandRecordingContext::Release() {
mD3d12CommandList.Reset();
+ mD3d12CommandList4.Reset();
mIsOpen = false;
+ mSharedTextures.clear();
}
bool CommandRecordingContext::IsOpen() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
index 544dae97354..d501d59692d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandRecordingContext.h
@@ -15,23 +15,33 @@
#define DAWNNATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
#include "dawn_native/Error.h"
+#include "dawn_native/d3d12/TextureD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
+#include <set>
+
namespace dawn_native { namespace d3d12 {
class CommandAllocatorManager;
+ class Texture;
class CommandRecordingContext {
public:
+ void AddToSharedTextureList(Texture* texture);
MaybeError Open(ID3D12Device* d3d12Device,
CommandAllocatorManager* commandAllocationManager);
- ResultOrError<ID3D12GraphicsCommandList*> Close();
+
ID3D12GraphicsCommandList* GetCommandList() const;
+ ID3D12GraphicsCommandList4* GetCommandList4() const;
void Release();
bool IsOpen() const;
+ MaybeError ExecuteCommandList(ID3D12CommandQueue* d3d12CommandQueue);
+
private:
ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+ ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
bool mIsOpen = false;
+ std::set<Texture*> mSharedTextures;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp
deleted file mode 100644
index 9a55e690b2a..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/CommittedResourceAllocatorD3D12.h"
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- CommittedResourceAllocator::CommittedResourceAllocator(Device* device, D3D12_HEAP_TYPE heapType)
- : mDevice(device), mHeapType(heapType) {
- }
-
- ResultOrError<ResourceHeapAllocation> CommittedResourceAllocator::Allocate(
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags) {
- D3D12_HEAP_PROPERTIES heapProperties;
- heapProperties.Type = mHeapType;
- heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
- heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
- heapProperties.CreationNodeMask = 0;
- heapProperties.VisibleNodeMask = 0;
-
- ComPtr<ID3D12Resource> committedResource;
- if (FAILED(mDevice->GetD3D12Device()->CreateCommittedResource(
- &heapProperties, heapFlags, &resourceDescriptor, initialUsage, nullptr,
- IID_PPV_ARGS(&committedResource)))) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate resource");
- }
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
-
- return ResourceHeapAllocation{info,
- /*offset*/ 0, std::move(committedResource)};
- }
-
- void CommittedResourceAllocator::Deallocate(ResourceHeapAllocation& allocation) {
- mDevice->ReferenceUntilUnused(allocation.GetD3D12Resource());
- }
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h
deleted file mode 100644
index 7bfb9d8b420..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommittedResourceAllocatorD3D12.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_D3D12_COMMITTEDRESOURCEALLOCATORD3D12_H_
-#define DAWNNATIVE_D3D12_COMMITTEDRESOURCEALLOCATORD3D12_H_
-
-#include "common/SerialQueue.h"
-#include "dawn_native/Error.h"
-#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
-#include "dawn_native/d3d12/d3d12_platform.h"
-
-namespace dawn_native { namespace d3d12 {
-
- class Device;
-
- // Wrapper to allocate D3D12 committed resource.
- // Committed resources are implicitly backed by a D3D12 heap.
- class CommittedResourceAllocator {
- public:
- CommittedResourceAllocator(Device* device, D3D12_HEAP_TYPE heapType);
- ~CommittedResourceAllocator() = default;
-
- ResultOrError<ResourceHeapAllocation> Allocate(
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags);
- void Deallocate(ResourceHeapAllocation& allocation);
-
- private:
- Device* mDevice;
- D3D12_HEAP_TYPE mHeapType;
- };
-
-}} // namespace dawn_native::d3d12
-
-#endif // DAWNNATIVE_D3D12_COMMITTEDRESOURCEALLOCATORD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
index 893b5654378..f1b94914a40 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ComputePipelineD3D12.cpp
@@ -32,8 +32,8 @@ namespace dawn_native { namespace d3d12 {
// SPRIV-cross does matrix multiplication expecting row major matrices
compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
- const ShaderModule* module = ToBackend(descriptor->computeStage.module);
- const std::string& hlslSource = module->GetHLSLSource(ToBackend(GetLayout()));
+ ShaderModule* module = ToBackend(descriptor->computeStage.module);
+ const std::string hlslSource = module->GetHLSLSource(ToBackend(GetLayout()));
ComPtr<ID3DBlob> compiledShader;
ComPtr<ID3DBlob> errors;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index 5d96593d81a..2db62da4610 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -20,38 +20,41 @@
#include "common/SwapChainUtils.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/NativeSwapChainImplD3D12.h"
+#include "dawn_native/d3d12/TextureD3D12.h"
namespace dawn_native { namespace d3d12 {
- ComPtr<ID3D12Device> GetD3D12Device(DawnDevice device) {
+ ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return backendDevice->GetD3D12Device();
}
- DawnSwapChainImplementation CreateNativeSwapChainImpl(DawnDevice device, HWND window) {
+ DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
Device* backendDevice = reinterpret_cast<Device*>(device);
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
- impl.textureUsage = DAWN_TEXTURE_USAGE_PRESENT;
+ impl.textureUsage = WGPUTextureUsage_Present;
return impl;
}
- DawnTextureFormat GetNativeSwapChainPreferredFormat(
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
const DawnSwapChainImplementation* swapChain) {
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<DawnTextureFormat>(impl->GetPreferredFormat());
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
}
- DawnTexture WrapSharedHandle(DawnDevice device,
- const DawnTextureDescriptor* descriptor,
- HANDLE sharedHandle) {
+ WGPUTexture WrapSharedHandle(WGPUDevice device,
+ const WGPUTextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey) {
Device* backendDevice = reinterpret_cast<Device*>(device);
const TextureDescriptor* backendDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor);
- TextureBase* texture = backendDevice->WrapSharedHandle(backendDescriptor, sharedHandle);
- return reinterpret_cast<DawnTexture>(texture);
+ TextureBase* texture =
+ backendDevice->WrapSharedHandle(backendDescriptor, sharedHandle, acquireMutexKey);
+ return reinterpret_cast<WGPUTexture>(texture);
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
index 38bde463f15..2cd46273c56 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.cpp
@@ -26,4 +26,11 @@ namespace dawn_native { namespace d3d12 {
return DAWN_DEVICE_LOST_ERROR(message);
}
+ MaybeError CheckOutOfMemoryHRESULT(HRESULT result, const char* context) {
+ if (result == E_OUTOFMEMORY) {
+ return DAWN_OUT_OF_MEMORY_ERROR(context);
+ }
+ return CheckHRESULT(result, context);
+ }
+
}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
index b5f5eb73800..ed11a8c7c5d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
@@ -23,6 +23,9 @@ namespace dawn_native { namespace d3d12 {
// Returns a success only if result of HResult is success
MaybeError CheckHRESULT(HRESULT result, const char* context);
+ // Uses CheckRESULT but returns OOM specific error when recoverable.
+ MaybeError CheckOutOfMemoryHRESULT(HRESULT result, const char* context);
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
index 1431b34c400..de9bd0369d7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
@@ -16,7 +16,7 @@
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
-
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
namespace dawn_native { namespace d3d12 {
@@ -24,19 +24,33 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
D3D12DeviceInfo info = {};
- // Gather info about device memory
- {
- // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
- // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
- // for backwards compat.
- // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
- D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
- if (FAILED(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &arch,
- sizeof(arch)))) {
- return DAWN_DEVICE_LOST_ERROR("CheckFeatureSupport failed");
- }
-
- info.isUMA = arch.UMA;
+ // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
+ // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
+ // for backwards compat.
+ // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
+ D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
+ DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
+ &arch, sizeof(arch)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.isUMA = arch.UMA;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
+ DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+ &options, sizeof(options)),
+ "ID3D12Device::CheckFeatureSupport"));
+
+ info.resourceHeapTier = options.ResourceHeapTier;
+
+ // Windows builds 1809 and above can use the D3D12 render pass API. If we query
+ // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
+ // the render pass API.
+ D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
+ info.supportsRenderPass = true;
+ } else {
+ info.supportsRenderPass = false;
}
return info;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
index 11be2d31ba9..78d38208169 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
@@ -24,6 +24,8 @@ namespace dawn_native { namespace d3d12 {
struct D3D12DeviceInfo {
bool isUMA;
+ uint32_t resourceHeapTier;
+ bool supportsRenderPass;
};
ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
index a69641a152b..facc307b267 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
#include "common/Assert.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -25,7 +26,7 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapHandle::DescriptorHeapHandle(ComPtr<ID3D12DescriptorHeap> descriptorHeap,
uint32_t sizeIncrement,
- uint32_t offset)
+ uint64_t offset)
: mDescriptorHeap(descriptorHeap), mSizeIncrement(sizeIncrement), mOffset(offset) {
}
@@ -68,12 +69,11 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapInfo* heapInfo,
D3D12_DESCRIPTOR_HEAP_FLAGS flags) {
const Serial pendingSerial = mDevice->GetPendingCommandSerial();
- size_t startOffset = (heapInfo->heap == nullptr)
- ? RingBufferAllocator::kInvalidOffset
- : heapInfo->allocator.Allocate(count, pendingSerial);
+ uint64_t startOffset = (heapInfo->heap == nullptr)
+ ? RingBufferAllocator::kInvalidOffset
+ : heapInfo->allocator.Allocate(count, pendingSerial);
if (startOffset != RingBufferAllocator::kInvalidOffset) {
- return DescriptorHeapHandle{heapInfo->heap, mSizeIncrements[type],
- static_cast<uint32_t>(startOffset)};
+ return DescriptorHeapHandle{heapInfo->heap, mSizeIncrements[type], startOffset};
}
// If the pool has no more space, replace the pool with a new one of the specified size
@@ -84,10 +84,9 @@ namespace dawn_native { namespace d3d12 {
heapDescriptor.Flags = flags;
heapDescriptor.NodeMask = 0;
ComPtr<ID3D12DescriptorHeap> heap;
- if (FAILED(mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor,
- IID_PPV_ARGS(&heap)))) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate heap");
- }
+ DAWN_TRY(CheckHRESULT(
+ mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
+ "ID3D12Device::CreateDescriptorHeap"));
mDevice->ReferenceUntilUnused(heap);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
index e4949a68cd7..bcb6ff5f01a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DescriptorHeapAllocator.h
@@ -33,7 +33,7 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapHandle();
DescriptorHeapHandle(ComPtr<ID3D12DescriptorHeap> descriptorHeap,
uint32_t sizeIncrement,
- uint32_t offset);
+ uint64_t offset);
ID3D12DescriptorHeap* Get() const;
D3D12_CPU_DESCRIPTOR_HANDLE GetCPUHandle(uint32_t index) const;
@@ -42,7 +42,7 @@ namespace dawn_native { namespace d3d12 {
private:
ComPtr<ID3D12DescriptorHeap> mDescriptorHeap;
uint32_t mSizeIncrement;
- uint32_t mOffset;
+ uint64_t mOffset;
};
class DescriptorHeapAllocator {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index f1b2e3120fa..0e1ea4790b2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -31,7 +31,6 @@
#include "dawn_native/d3d12/PlatformFunctions.h"
#include "dawn_native/d3d12/QueueD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
-#include "dawn_native/d3d12/ResourceAllocator.h"
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
#include "dawn_native/d3d12/ShaderModuleD3D12.h"
@@ -43,6 +42,7 @@ namespace dawn_native { namespace d3d12 {
Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
: DeviceBase(adapter, descriptor) {
+ InitTogglesFromDriver();
if (descriptor != nullptr) {
ApplyToggleOverrides(descriptor);
}
@@ -72,7 +72,6 @@ namespace dawn_native { namespace d3d12 {
mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
mDescriptorHeapAllocator = std::make_unique<DescriptorHeapAllocator>(this);
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
- mResourceAllocator = std::make_unique<ResourceAllocator>(this);
mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
DAWN_TRY(NextSerial());
@@ -125,10 +124,6 @@ namespace dawn_native { namespace d3d12 {
// MAX.
mCompletedSerial = std::numeric_limits<Serial>::max();
- // Releasing the uploader enqueues buffers to be released.
- // Call Tick() again to clear them before releasing the allocator.
- mResourceAllocator->Tick(mCompletedSerial);
-
if (mFenceEvent != nullptr) {
::CloseHandle(mFenceEvent);
}
@@ -175,10 +170,6 @@ namespace dawn_native { namespace d3d12 {
return mMapRequestTracker.get();
}
- ResourceAllocator* Device::GetResourceAllocator() const {
- return mResourceAllocator.get();
- }
-
CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
return mCommandAllocatorManager.get();
}
@@ -212,12 +203,12 @@ namespace dawn_native { namespace d3d12 {
// as it enqueued resources to be released.
mDynamicUploader->Deallocate(mCompletedSerial);
- mResourceAllocator->Tick(mCompletedSerial);
+ mResourceAllocatorManager->Tick(mCompletedSerial);
DAWN_TRY(mCommandAllocatorManager->Tick(mCompletedSerial));
mDescriptorHeapAllocator->Deallocate(mCompletedSerial);
mMapRequestTracker->Tick(mCompletedSerial);
mUsedComObjectRefs.ClearUpTo(mCompletedSerial);
- DAWN_TRY(ExecuteCommandContext(nullptr));
+ DAWN_TRY(ExecutePendingCommandContext());
DAWN_TRY(NextSerial());
return {};
}
@@ -242,26 +233,8 @@ namespace dawn_native { namespace d3d12 {
mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
}
- MaybeError Device::ExecuteCommandContext(CommandRecordingContext* commandContext) {
- UINT numLists = 0;
- std::array<ID3D12CommandList*, 2> d3d12CommandLists;
-
- // If there are pending commands, prepend them to ExecuteCommandLists
- if (mPendingCommands.IsOpen()) {
- ID3D12GraphicsCommandList* d3d12CommandList;
- DAWN_TRY_ASSIGN(d3d12CommandList, mPendingCommands.Close());
- d3d12CommandLists[numLists++] = d3d12CommandList;
- }
- if (commandContext != nullptr) {
- ID3D12GraphicsCommandList* d3d12CommandList;
- DAWN_TRY_ASSIGN(d3d12CommandList, commandContext->Close());
- d3d12CommandLists[numLists++] = d3d12CommandList;
- }
- if (numLists > 0) {
- mCommandQueue->ExecuteCommandLists(numLists, d3d12CommandLists.data());
- }
-
- return {};
+ MaybeError Device::ExecutePendingCommandContext() {
+ return mPendingCommands.ExecuteCommandList(mCommandQueue.Get());
}
ResultOrError<BindGroupBase*> Device::CreateBindGroupImpl(
@@ -277,7 +250,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(buffer->Initialize());
return buffer.release();
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return new CommandBuffer(encoder, descriptor);
}
@@ -301,7 +274,7 @@ namespace dawn_native { namespace d3d12 {
}
ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor) {
- return new ShaderModule(this, descriptor);
+ return ShaderModule::Create(this, descriptor);
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
@@ -332,7 +305,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
ToBackend(destination)
- ->TransitionUsageNow(commandRecordingContext, dawn::BufferUsage::CopyDst);
+ ->TransitionUsageNow(commandRecordingContext, wgpu::BufferUsage::CopyDst);
commandRecordingContext->GetCommandList()->CopyBufferRegion(
ToBackend(destination)->GetD3D12Resource().Get(), destinationOffset,
@@ -348,33 +321,100 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags) {
- return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor, initialUsage,
- heapFlags);
+ D3D12_RESOURCE_STATES initialUsage) {
+ return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
+ initialUsage);
}
TextureBase* Device::WrapSharedHandle(const TextureDescriptor* descriptor,
- HANDLE sharedHandle) {
- if (ConsumedError(ValidateTextureDescriptor(this, descriptor))) {
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey) {
+ TextureBase* dawnTexture;
+ if (ConsumedError(Texture::Create(this, descriptor, sharedHandle, acquireMutexKey),
+ &dawnTexture))
return nullptr;
- }
- if (ConsumedError(ValidateTextureDescriptorCanBeWrapped(descriptor))) {
- return nullptr;
+ return dawnTexture;
+ }
+
+ // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+ // are a viable alternative but are, unfortunately, not available on all versions of Windows
+ // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+ // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+ ResultOrError<ComPtr<IDXGIKeyedMutex>> Device::CreateKeyedMutexForTexture(
+ ID3D12Resource* d3d12Resource) {
+ if (mD3d11On12Device == nullptr) {
+ ComPtr<ID3D11Device> d3d11Device;
+ ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+ D3D_FEATURE_LEVEL d3dFeatureLevel;
+ IUnknown* const iUnknownQueue = mCommandQueue.Get();
+ DAWN_TRY(CheckHRESULT(GetFunctions()->d3d11on12CreateDevice(
+ mD3d12Device.Get(), 0, nullptr, 0, &iUnknownQueue, 1, 1,
+ &d3d11Device, &d3d11DeviceContext, &d3dFeatureLevel),
+ "D3D12 11on12 device create"));
+
+ ComPtr<ID3D11On12Device> d3d11on12Device;
+ DAWN_TRY(CheckHRESULT(d3d11Device.As(&d3d11on12Device),
+ "D3D12 QueryInterface ID3D11Device to ID3D11On12Device"));
+
+ ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+ DAWN_TRY(
+ CheckHRESULT(d3d11DeviceContext.As(&d3d11DeviceContext2),
+ "D3D12 QueryInterface ID3D11DeviceContext to ID3D11DeviceContext2"));
+
+ mD3d11On12DeviceContext = std::move(d3d11DeviceContext2);
+ mD3d11On12Device = std::move(d3d11on12Device);
}
- ComPtr<ID3D12Resource> d3d12Resource;
- const HRESULT hr =
- mD3d12Device->OpenSharedHandle(sharedHandle, IID_PPV_ARGS(&d3d12Resource));
+ ComPtr<ID3D11Texture2D> d3d11Texture;
+ D3D11_RESOURCE_FLAGS resourceFlags;
+ resourceFlags.BindFlags = 0;
+ resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+ resourceFlags.CPUAccessFlags = 0;
+ resourceFlags.StructureByteStride = 0;
+ DAWN_TRY(CheckHRESULT(mD3d11On12Device->CreateWrappedResource(
+ d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
+ D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)),
+ "D3D12 creating a wrapped resource"));
+
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+ DAWN_TRY(CheckHRESULT(d3d11Texture.As(&dxgiKeyedMutex),
+ "D3D12 QueryInterface ID3D11Texture2D to IDXGIKeyedMutex"));
+
+ return dxgiKeyedMutex;
+ }
+
+ void Device::ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex) {
+ ComPtr<ID3D11Resource> d3d11Resource;
+ HRESULT hr = dxgiKeyedMutex.As(&d3d11Resource);
if (FAILED(hr)) {
- return nullptr;
+ return;
}
- if (ConsumedError(ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), descriptor))) {
- return nullptr;
- }
+ ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+ mD3d11On12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+
+ d3d11Resource.Reset();
+ dxgiKeyedMutex.Reset();
- return new Texture(this, descriptor, std::move(d3d12Resource));
+ // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+ // are not released until work is submitted to the device context and flushed.
+ // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+
+ // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+ // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+ mD3d11On12DeviceContext->TiledResourceBarrier(nullptr, nullptr);
+ mD3d11On12DeviceContext->Flush();
+ }
+
+ const D3D12DeviceInfo& Device::GetDeviceInfo() const {
+ return ToBackend(GetAdapter())->GetDeviceInfo();
}
+
+ void Device::InitTogglesFromDriver() {
+ const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
+ SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
+ SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 0a00da293b4..2740e039bd2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -20,6 +20,7 @@
#include "common/SerialQueue.h"
#include "dawn_native/Device.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
+#include "dawn_native/d3d12/D3D12Info.h"
#include "dawn_native/d3d12/Forward.h"
#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
@@ -31,7 +32,6 @@ namespace dawn_native { namespace d3d12 {
class DescriptorHeapAllocator;
class MapRequestTracker;
class PlatformFunctions;
- class ResourceAllocator;
class ResourceAllocatorManager;
#define ASSERT_SUCCESS(hr) \
@@ -48,7 +48,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
- CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) override;
Serial GetCompletedCommandSerial() const final override;
@@ -64,7 +64,6 @@ namespace dawn_native { namespace d3d12 {
DescriptorHeapAllocator* GetDescriptorHeapAllocator() const;
MapRequestTracker* GetMapRequestTracker() const;
- ResourceAllocator* GetResourceAllocator() const;
CommandAllocatorManager* GetCommandAllocatorManager() const;
const PlatformFunctions* GetFunctions() const;
@@ -73,12 +72,14 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
Serial GetPendingCommandSerial() const override;
+ const D3D12DeviceInfo& GetDeviceInfo() const;
+
MaybeError NextSerial();
MaybeError WaitForSerial(Serial serial);
void ReferenceUntilUnused(ComPtr<IUnknown> object);
- MaybeError ExecuteCommandContext(CommandRecordingContext* commandContext);
+ MaybeError ExecutePendingCommandContext();
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
@@ -90,12 +91,18 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ResourceHeapAllocation> AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags);
+ D3D12_RESOURCE_STATES initialUsage);
void DeallocateMemory(ResourceHeapAllocation& allocation);
- TextureBase* WrapSharedHandle(const TextureDescriptor* descriptor, HANDLE sharedHandle);
+ TextureBase* WrapSharedHandle(const TextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey);
+ ResultOrError<ComPtr<IDXGIKeyedMutex>> CreateKeyedMutexForTexture(
+ ID3D12Resource* d3d12Resource);
+ void ReleaseKeyedMutexForTexture(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex);
+
+ void InitTogglesFromDriver();
private:
ResultOrError<BindGroupBase*> CreateBindGroupImpl(
@@ -128,6 +135,10 @@ namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Device> mD3d12Device; // Device is owned by adapter and will not be outlived.
ComPtr<ID3D12CommandQueue> mCommandQueue;
+ // 11on12 device and device context corresponding to mCommandQueue
+ ComPtr<ID3D11On12Device> mD3d11On12Device;
+ ComPtr<ID3D11DeviceContext2> mD3d11On12DeviceContext;
+
ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
@@ -139,7 +150,6 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
std::unique_ptr<DescriptorHeapAllocator> mDescriptorHeapAllocator;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
- std::unique_ptr<ResourceAllocator> mResourceAllocator;
std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
dawn_native::PCIInfo mPCIInfo;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
new file mode 100644
index 00000000000..e16f380110b
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.cpp
@@ -0,0 +1,56 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ HeapAllocator::HeapAllocator(Device* device,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_HEAP_FLAGS heapFlags)
+ : mDevice(device), mHeapType(heapType), mHeapFlags(heapFlags) {
+ }
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
+ uint64_t size) {
+ D3D12_HEAP_DESC heapDesc;
+ heapDesc.SizeInBytes = size;
+ heapDesc.Properties.Type = mHeapType;
+ heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapDesc.Properties.CreationNodeMask = 0;
+ heapDesc.Properties.VisibleNodeMask = 0;
+ // It is preferred to use a size that is a multiple of the alignment.
+ // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
+ // if the heap size is too small, the VMM would fragment.
+ // TODO(bryan.bernhart@intel.com): Consider having MSAA vs non-MSAA heaps.
+ heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
+ heapDesc.Flags = mHeapFlags;
+
+ ComPtr<ID3D12Heap> heap;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&heap)),
+ "ID3D12Device::CreateHeap"));
+
+ return {std::make_unique<Heap>(std::move(heap))};
+ }
+
+ void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
+ mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
+ }
+
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h
index 9311db6df0b..34b435d7381 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapAllocatorD3D12.h
@@ -1,4 +1,4 @@
-// Copyright 2017 The Dawn Authors
+// Copyright 2019 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,33 +12,32 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATIONMANAGER_H_
-#define DAWNNATIVE_D3D12_RESOURCEALLOCATIONMANAGER_H_
+#ifndef DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
+#include "dawn_native/ResourceHeapAllocator.h"
#include "dawn_native/d3d12/d3d12_platform.h"
-#include "common/SerialQueue.h"
-
namespace dawn_native { namespace d3d12 {
class Device;
- class ResourceAllocator {
+ // Wrapper to allocate a D3D12 heap.
+ class HeapAllocator : public ResourceHeapAllocator {
public:
- ResourceAllocator(Device* device);
+ HeapAllocator(Device* device, D3D12_HEAP_TYPE heapType, D3D12_HEAP_FLAGS heapFlags);
+ ~HeapAllocator() override = default;
- ComPtr<ID3D12Resource> Allocate(D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage);
- void Release(ComPtr<ID3D12Resource> resource);
- void Tick(uint64_t lastCompletedSerial);
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
private:
Device* mDevice;
-
- SerialQueue<ComPtr<ID3D12Resource>> mReleasedResources;
+ D3D12_HEAP_TYPE mHeapType;
+ D3D12_HEAP_FLAGS mHeapFlags;
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATIONMANAGER_H_
+#endif // DAWNNATIVE_D3D12_HEAPALLOCATORD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
new file mode 100644
index 00000000000..2e35bdf7cf7
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
@@ -0,0 +1,25 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/HeapD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ Heap::Heap(ComPtr<ID3D12Heap> heap) : mHeap(std::move(heap)) {
+ }
+
+ ComPtr<ID3D12Heap> Heap::GetD3D12Heap() const {
+ return mHeap;
+ }
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
new file mode 100644
index 00000000000..834e42ac9fb
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
@@ -0,0 +1,35 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_HEAPD3D12_H_
+#define DAWNNATIVE_D3D12_HEAPD3D12_H_
+
+#include "dawn_native/ResourceHeap.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ class Heap : public ResourceHeapBase {
+ public:
+ Heap(ComPtr<ID3D12Heap> heap);
+ ~Heap() = default;
+
+ ComPtr<ID3D12Heap> GetD3D12Heap() const;
+
+ private:
+ ComPtr<ID3D12Heap> mHeap;
+ };
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_HEAPD3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
index 1210bb8d606..9170e8ed864 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -21,15 +21,15 @@
namespace dawn_native { namespace d3d12 {
namespace {
- DXGI_USAGE D3D12SwapChainBufferUsage(DawnTextureUsage allowedUsages) {
+ DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
- if (allowedUsages & DAWN_TEXTURE_USAGE_SAMPLED) {
+ if (allowedUsages & WGPUTextureUsage_Sampled) {
usage |= DXGI_USAGE_SHADER_INPUT;
}
- if (allowedUsages & DAWN_TEXTURE_USAGE_STORAGE) {
+ if (allowedUsages & WGPUTextureUsage_Storage) {
usage |= DXGI_USAGE_UNORDERED_ACCESS;
}
- if (allowedUsages & DAWN_TEXTURE_USAGE_OUTPUT_ATTACHMENT) {
+ if (allowedUsages & WGPUTextureUsage_OutputAttachment) {
usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
}
return usage;
@@ -39,7 +39,7 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
- : mWindow(window), mDevice(device) {
+ : mWindow(window), mDevice(device), mInterval(1) {
}
NativeSwapChainImpl::~NativeSwapChainImpl() {
@@ -48,17 +48,19 @@ namespace dawn_native { namespace d3d12 {
void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
}
- DawnSwapChainError NativeSwapChainImpl::Configure(DawnTextureFormat format,
- DawnTextureUsage usage,
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
uint32_t width,
uint32_t height) {
ASSERT(width > 0);
ASSERT(height > 0);
- ASSERT(format == static_cast<DawnTextureFormat>(GetPreferredFormat()));
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
+ mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
+
// Create the D3D12 swapchain, assuming only two buffers for now
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
swapChainDesc.Width = width;
@@ -103,7 +105,7 @@ namespace dawn_native { namespace d3d12 {
DawnSwapChainError NativeSwapChainImpl::Present() {
// This assumes the texture has already been transition to the PRESENT state.
- ASSERT_SUCCESS(mSwapChain->Present(1, 0));
+ ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
// TODO(cwallez@chromium.org): Make the serial ticking implicit.
ASSERT(mDevice->NextSerial().IsSuccess());
@@ -111,8 +113,8 @@ namespace dawn_native { namespace d3d12 {
return DAWN_SWAP_CHAIN_NO_ERROR;
}
- dawn::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return dawn::TextureFormat::RGBA8Unorm;
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h
index 223f5ef6b90..aaa8d85ee59 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/NativeSwapChainImplD3D12.h
@@ -34,18 +34,19 @@ namespace dawn_native { namespace d3d12 {
~NativeSwapChainImpl();
void Init(DawnWSIContextD3D12* context);
- DawnSwapChainError Configure(DawnTextureFormat format,
- DawnTextureUsage,
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
uint32_t width,
uint32_t height);
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
DawnSwapChainError Present();
- dawn::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
private:
HWND mWindow = nullptr;
Device* mDevice = nullptr;
+ UINT mInterval;
ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
std::vector<ComPtr<ID3D12Resource>> mBuffers;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 6c38e68c5d6..2cdd6cf1db7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -25,14 +25,14 @@ using Microsoft::WRL::ComPtr;
namespace dawn_native { namespace d3d12 {
namespace {
- D3D12_SHADER_VISIBILITY ShaderVisibilityType(dawn::ShaderStage visibility) {
- ASSERT(visibility != dawn::ShaderStage::None);
+ D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
+ ASSERT(visibility != wgpu::ShaderStage::None);
- if (visibility == dawn::ShaderStage::Vertex) {
+ if (visibility == wgpu::ShaderStage::Vertex) {
return D3D12_SHADER_VISIBILITY_VERTEX;
}
- if (visibility == dawn::ShaderStage::Fragment) {
+ if (visibility == wgpu::ShaderStage::Fragment) {
return D3D12_SHADER_VISIBILITY_PIXEL;
}
@@ -40,16 +40,16 @@ namespace dawn_native { namespace d3d12 {
return D3D12_SHADER_VISIBILITY_ALL;
}
- D3D12_ROOT_PARAMETER_TYPE RootParameterType(dawn::BindingType type) {
+ D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BindingType type) {
switch (type) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
return D3D12_ROOT_PARAMETER_TYPE_CBV;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
return D3D12_ROOT_PARAMETER_TYPE_UAV;
- case dawn::BindingType::SampledTexture:
- case dawn::BindingType::Sampler:
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
index 747cb68dbfa..ae25dc1934a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.cpp
@@ -27,6 +27,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(LoadD3D12());
DAWN_TRY(LoadDXGI());
DAWN_TRY(LoadD3DCompiler());
+ DAWN_TRY(LoadD3D11());
LoadPIXRuntime();
return {};
}
@@ -50,6 +51,16 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ MaybeError PlatformFunctions::LoadD3D11() {
+ std::string error;
+ if (!mD3D11Lib.Open("d3d11.dll", &error) ||
+ !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
+ return DAWN_DEVICE_LOST_ERROR(error.c_str());
+ }
+
+ return {};
+ }
+
MaybeError PlatformFunctions::LoadDXGI() {
std::string error;
if (!mDXGILib.Open("dxgi.dll", &error) ||
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h
index f367bfce81d..a9e85d274b7 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PlatformFunctions.h
@@ -77,13 +77,18 @@ namespace dawn_native { namespace d3d12 {
PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
+ // Functions from D3D11.dll
+ PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
+
private:
MaybeError LoadD3D12();
+ MaybeError LoadD3D11();
MaybeError LoadDXGI();
MaybeError LoadD3DCompiler();
void LoadPIXRuntime();
DynamicLib mD3D12Lib;
+ DynamicLib mD3D11Lib;
DynamicLib mDXGILib;
DynamicLib mD3DCompilerLib;
DynamicLib mPIXEventRuntimeLib;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
index 8c50bd78b9a..c563c5028bd 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
@@ -17,6 +17,8 @@
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_platform/DawnPlatform.h"
+#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native { namespace d3d12 {
@@ -28,13 +30,18 @@ namespace dawn_native { namespace d3d12 {
device->Tick();
- DAWN_TRY(mCommandContext.Open(device->GetD3D12Device().Get(),
- device->GetCommandAllocatorManager()));
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferD3D12::RecordCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
- DAWN_TRY(ToBackend(commands[i])->RecordCommands(&mCommandContext, i));
+ DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext, i));
}
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferD3D12::RecordCommands");
- DAWN_TRY(device->ExecuteCommandContext(&mCommandContext));
+ DAWN_TRY(device->ExecutePendingCommandContext());
DAWN_TRY(device->NextSerial());
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
index 121d19c6b75..0e3c795d9b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
@@ -31,8 +31,6 @@ namespace dawn_native { namespace d3d12 {
private:
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
-
- CommandRecordingContext mCommandContext;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
new file mode 100644
index 00000000000..c6846a06cac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
@@ -0,0 +1,232 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
+
+#include "dawn_native/Format.h"
+#include "dawn_native/d3d12/CommandBufferD3D12.h"
+#include "dawn_native/d3d12/Forward.h"
+#include "dawn_native/d3d12/TextureD3D12.h"
+
+#include "dawn_native/dawn_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ namespace {
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
+ switch (loadOp) {
+ case wgpu::LoadOp::Clear:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
+ case wgpu::LoadOp::Load:
+ return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
+ switch (storeOp) {
+ case wgpu::StoreOp::Clear:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
+ case wgpu::StoreOp::Store:
+ return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
+
+ resolveParameters.Format = resolveDestination->GetD3D12Format();
+ resolveParameters.pSrcResource =
+ ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
+ resolveParameters.pDstResource =
+ ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
+
+ // Clear or preserve the resolve source.
+ if (storeOp == wgpu::StoreOp::Clear) {
+ resolveParameters.PreserveResolveSource = false;
+ } else if (storeOp == wgpu::StoreOp::Store) {
+ resolveParameters.PreserveResolveSource = true;
+ }
+
+ // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
+ // TODO: Investigate and determine how integer format resolves should work in WebGPU.
+ switch (resolveDestination->GetFormat().type) {
+ case Format::Type::Sint:
+ case Format::Type::Uint:
+ resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_MAX;
+ break;
+ default:
+ resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
+ break;
+ }
+
+ resolveParameters.SubresourceCount = 1;
+
+ return resolveParameters;
+ }
+
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
+ D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
+ D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
+ Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+
+ subresourceParameters.DstX = 0;
+ subresourceParameters.DstY = 0;
+ subresourceParameters.SrcSubresource = 0;
+ subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
+ resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer());
+ // Resolving a specified sub-rect is only valid on hardware that supports sample
+ // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
+ // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
+ // "empty" to resolve the entire region.
+ subresourceParameters.SrcRect = {0, 0, 0, 0};
+
+ return subresourceParameters;
+ }
+ } // anonymous namespace
+
+ RenderPassBuilder::RenderPassBuilder(const OMSetRenderTargetArgs& args, bool hasUAV)
+ : mColorAttachmentCount(args.numRTVs), mRenderTargetViews(args.RTVs.data()) {
+ for (uint32_t i = 0; i < mColorAttachmentCount; i++) {
+ mRenderPassRenderTargetDescriptors[i].cpuDescriptor = args.RTVs[i];
+ }
+
+ mRenderPassDepthStencilDesc.cpuDescriptor = args.dsv;
+
+ if (hasUAV) {
+ mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
+ }
+ }
+
+ uint32_t RenderPassBuilder::GetColorAttachmentCount() const {
+ return mColorAttachmentCount;
+ }
+
+ bool RenderPassBuilder::HasDepth() const {
+ return mHasDepth;
+ }
+
+ const D3D12_RENDER_PASS_RENDER_TARGET_DESC*
+ RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
+ return mRenderPassRenderTargetDescriptors.data();
+ }
+
+ const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
+ RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
+ return &mRenderPassDepthStencilDesc;
+ }
+
+ D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
+ return mRenderPassFlags;
+ }
+
+ const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
+ return mRenderTargetViews;
+ }
+
+ void RenderPassBuilder::SetRenderTargetBeginningAccess(uint32_t attachment,
+ wgpu::LoadOp loadOp,
+ dawn_native::Color clearColor,
+ DXGI_FORMAT format) {
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
+ D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
+ mRenderPassRenderTargetDescriptors[attachment]
+ .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
+ mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
+ format;
+ }
+ }
+
+ void RenderPassBuilder::SetRenderTargetEndingAccess(uint32_t attachment,
+ wgpu::StoreOp storeOp) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetRenderTargetEndingAccessResolve(uint32_t attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination) {
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
+ D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
+
+ mSubresourceParams[attachment] =
+ D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
+
+ mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
+ &mSubresourceParams[attachment];
+ }
+
+ void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format) {
+ mHasDepth = true;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
+ clearDepth;
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format) {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+ if (loadOp == wgpu::LoadOp::Clear) {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
+ .Stencil = clearStencil;
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
+ }
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
+ }
+
+ void RenderPassBuilder::SetDepthNoAccess() {
+ mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+ }
+
+ void RenderPassBuilder::SetDepthStencilNoAccess() {
+ SetDepthNoAccess();
+ SetStencilNoAccess();
+ }
+
+ void RenderPassBuilder::SetStencilNoAccess() {
+ mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
+ D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+ mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
+ D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+ }
+
+}} // namespace dawn_native::d3d12 \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
new file mode 100644
index 00000000000..1ecd87e3ce8
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
@@ -0,0 +1,89 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+#define DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
+
+#include "common/Constants.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+#include "dawn_native/dawn_platform.h"
+
+#include <array>
+
+namespace dawn_native { namespace d3d12 {
+
+ class TextureView;
+
+ struct OMSetRenderTargetArgs;
+
+ // RenderPassBuilder stores parameters related to render pass load and store operations.
+ // When the D3D12 render pass API is available, the needed descriptors can be fetched
+ // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
+ // descriptors are still fetched and any information necessary to emulate the load and store
+ // operations is extracted from the descriptors.
+ class RenderPassBuilder {
+ public:
+ RenderPassBuilder(const OMSetRenderTargetArgs& args, bool hasUAV);
+
+ uint32_t GetColorAttachmentCount() const;
+
+ // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
+ // storage if D3D12 render pass API is unavailable.
+ const D3D12_RENDER_PASS_RENDER_TARGET_DESC* GetRenderPassRenderTargetDescriptors() const;
+ const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
+
+ D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
+
+ // Returns attachment RTVs to use with OMSetRenderTargets.
+ const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
+
+ bool HasDepth() const;
+
+ // Functions that set the appropriate values in the render pass descriptors.
+ void SetDepthAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ float clearDepth,
+ DXGI_FORMAT format);
+ void SetDepthNoAccess();
+ void SetDepthStencilNoAccess();
+ void SetRenderTargetBeginningAccess(uint32_t attachment,
+ wgpu::LoadOp loadOp,
+ dawn_native::Color clearColor,
+ DXGI_FORMAT format);
+ void SetRenderTargetEndingAccess(uint32_t attachment, wgpu::StoreOp storeOp);
+ void SetRenderTargetEndingAccessResolve(uint32_t attachment,
+ wgpu::StoreOp storeOp,
+ TextureView* resolveSource,
+ TextureView* resolveDestination);
+ void SetStencilAccess(wgpu::LoadOp loadOp,
+ wgpu::StoreOp storeOp,
+ uint8_t clearStencil,
+ DXGI_FORMAT format);
+ void SetStencilNoAccess();
+
+ private:
+ uint32_t mColorAttachmentCount = 0;
+ bool mHasDepth = false;
+ D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
+ D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
+ std::array<D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
+ mRenderPassRenderTargetDescriptors;
+ const D3D12_CPU_DESCRIPTOR_HANDLE* mRenderTargetViews;
+ std::array<D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
+ kMaxColorAttachments>
+ mSubresourceParams;
+ };
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index 82fd037d056..89c9ed514ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -28,95 +28,95 @@
namespace dawn_native { namespace d3d12 {
namespace {
- DXGI_FORMAT VertexFormatType(dawn::VertexFormat format) {
+ DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar2:
return DXGI_FORMAT_R8G8_UINT;
- case dawn::VertexFormat::UChar4:
+ case wgpu::VertexFormat::UChar4:
return DXGI_FORMAT_R8G8B8A8_UINT;
- case dawn::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char2:
return DXGI_FORMAT_R8G8_SINT;
- case dawn::VertexFormat::Char4:
+ case wgpu::VertexFormat::Char4:
return DXGI_FORMAT_R8G8B8A8_SINT;
- case dawn::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar2Norm:
return DXGI_FORMAT_R8G8_UNORM;
- case dawn::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::UChar4Norm:
return DXGI_FORMAT_R8G8B8A8_UNORM;
- case dawn::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char2Norm:
return DXGI_FORMAT_R8G8_SNORM;
- case dawn::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Char4Norm:
return DXGI_FORMAT_R8G8B8A8_SNORM;
- case dawn::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort2:
return DXGI_FORMAT_R16G16_UINT;
- case dawn::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort4:
return DXGI_FORMAT_R16G16B16A16_UINT;
- case dawn::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short2:
return DXGI_FORMAT_R16G16_SINT;
- case dawn::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short4:
return DXGI_FORMAT_R16G16B16A16_SINT;
- case dawn::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort2Norm:
return DXGI_FORMAT_R16G16_UNORM;
- case dawn::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::UShort4Norm:
return DXGI_FORMAT_R16G16B16A16_UNORM;
- case dawn::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short2Norm:
return DXGI_FORMAT_R16G16_SNORM;
- case dawn::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Short4Norm:
return DXGI_FORMAT_R16G16B16A16_SNORM;
- case dawn::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half2:
return DXGI_FORMAT_R16G16_FLOAT;
- case dawn::VertexFormat::Half4:
+ case wgpu::VertexFormat::Half4:
return DXGI_FORMAT_R16G16B16A16_FLOAT;
- case dawn::VertexFormat::Float:
+ case wgpu::VertexFormat::Float:
return DXGI_FORMAT_R32_FLOAT;
- case dawn::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float2:
return DXGI_FORMAT_R32G32_FLOAT;
- case dawn::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float3:
return DXGI_FORMAT_R32G32B32_FLOAT;
- case dawn::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float4:
return DXGI_FORMAT_R32G32B32A32_FLOAT;
- case dawn::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt:
return DXGI_FORMAT_R32_UINT;
- case dawn::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt2:
return DXGI_FORMAT_R32G32_UINT;
- case dawn::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt3:
return DXGI_FORMAT_R32G32B32_UINT;
- case dawn::VertexFormat::UInt4:
+ case wgpu::VertexFormat::UInt4:
return DXGI_FORMAT_R32G32B32A32_UINT;
- case dawn::VertexFormat::Int:
+ case wgpu::VertexFormat::Int:
return DXGI_FORMAT_R32_SINT;
- case dawn::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int2:
return DXGI_FORMAT_R32G32_SINT;
- case dawn::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int3:
return DXGI_FORMAT_R32G32B32_SINT;
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::Int4:
return DXGI_FORMAT_R32G32B32A32_SINT;
default:
UNREACHABLE();
}
}
- D3D12_INPUT_CLASSIFICATION InputStepModeFunction(dawn::InputStepMode mode) {
+ D3D12_INPUT_CLASSIFICATION InputStepModeFunction(wgpu::InputStepMode mode) {
switch (mode) {
- case dawn::InputStepMode::Vertex:
+ case wgpu::InputStepMode::Vertex:
return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
- case dawn::InputStepMode::Instance:
+ case wgpu::InputStepMode::Instance:
return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
default:
UNREACHABLE();
}
}
- D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(dawn::PrimitiveTopology primitiveTopology) {
+ D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
switch (primitiveTopology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
- case dawn::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineList:
return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineStrip:
return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
- case dawn::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleList:
return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
default:
UNREACHABLE();
@@ -124,95 +124,95 @@ namespace dawn_native { namespace d3d12 {
}
D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
- dawn::PrimitiveTopology primitiveTopology) {
+ wgpu::PrimitiveTopology primitiveTopology) {
switch (primitiveTopology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
- case dawn::PrimitiveTopology::LineList:
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
- case dawn::PrimitiveTopology::TriangleList:
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
default:
UNREACHABLE();
}
}
- D3D12_CULL_MODE D3D12CullMode(dawn::CullMode mode) {
+ D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
switch (mode) {
- case dawn::CullMode::None:
+ case wgpu::CullMode::None:
return D3D12_CULL_MODE_NONE;
- case dawn::CullMode::Front:
+ case wgpu::CullMode::Front:
return D3D12_CULL_MODE_FRONT;
- case dawn::CullMode::Back:
+ case wgpu::CullMode::Back:
return D3D12_CULL_MODE_BACK;
default:
UNREACHABLE();
}
}
- D3D12_BLEND D3D12Blend(dawn::BlendFactor factor) {
+ D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
switch (factor) {
- case dawn::BlendFactor::Zero:
+ case wgpu::BlendFactor::Zero:
return D3D12_BLEND_ZERO;
- case dawn::BlendFactor::One:
+ case wgpu::BlendFactor::One:
return D3D12_BLEND_ONE;
- case dawn::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::SrcColor:
return D3D12_BLEND_SRC_COLOR;
- case dawn::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
return D3D12_BLEND_INV_SRC_COLOR;
- case dawn::BlendFactor::SrcAlpha:
+ case wgpu::BlendFactor::SrcAlpha:
return D3D12_BLEND_SRC_ALPHA;
- case dawn::BlendFactor::OneMinusSrcAlpha:
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
return D3D12_BLEND_INV_SRC_ALPHA;
- case dawn::BlendFactor::DstColor:
+ case wgpu::BlendFactor::DstColor:
return D3D12_BLEND_DEST_COLOR;
- case dawn::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
return D3D12_BLEND_INV_DEST_COLOR;
- case dawn::BlendFactor::DstAlpha:
+ case wgpu::BlendFactor::DstAlpha:
return D3D12_BLEND_DEST_ALPHA;
- case dawn::BlendFactor::OneMinusDstAlpha:
+ case wgpu::BlendFactor::OneMinusDstAlpha:
return D3D12_BLEND_INV_DEST_ALPHA;
- case dawn::BlendFactor::SrcAlphaSaturated:
+ case wgpu::BlendFactor::SrcAlphaSaturated:
return D3D12_BLEND_SRC_ALPHA_SAT;
- case dawn::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::BlendColor:
return D3D12_BLEND_BLEND_FACTOR;
- case dawn::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
return D3D12_BLEND_INV_BLEND_FACTOR;
default:
UNREACHABLE();
}
}
- D3D12_BLEND_OP D3D12BlendOperation(dawn::BlendOperation operation) {
+ D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
switch (operation) {
- case dawn::BlendOperation::Add:
+ case wgpu::BlendOperation::Add:
return D3D12_BLEND_OP_ADD;
- case dawn::BlendOperation::Subtract:
+ case wgpu::BlendOperation::Subtract:
return D3D12_BLEND_OP_SUBTRACT;
- case dawn::BlendOperation::ReverseSubtract:
+ case wgpu::BlendOperation::ReverseSubtract:
return D3D12_BLEND_OP_REV_SUBTRACT;
- case dawn::BlendOperation::Min:
+ case wgpu::BlendOperation::Min:
return D3D12_BLEND_OP_MIN;
- case dawn::BlendOperation::Max:
+ case wgpu::BlendOperation::Max:
return D3D12_BLEND_OP_MAX;
default:
UNREACHABLE();
}
}
- uint8_t D3D12RenderTargetWriteMask(dawn::ColorWriteMask writeMask) {
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(dawn::ColorWriteMask::Red) ==
+ uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
D3D12_COLOR_WRITE_ENABLE_RED,
"ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(dawn::ColorWriteMask::Green) ==
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
D3D12_COLOR_WRITE_ENABLE_GREEN,
"ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(dawn::ColorWriteMask::Blue) ==
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
D3D12_COLOR_WRITE_ENABLE_BLUE,
"ColorWriteMask values must match");
- static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(dawn::ColorWriteMask::Alpha) ==
+ static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
D3D12_COLOR_WRITE_ENABLE_ALPHA,
"ColorWriteMask values must match");
return static_cast<uint8_t>(writeMask);
@@ -233,23 +233,23 @@ namespace dawn_native { namespace d3d12 {
return blendDesc;
}
- D3D12_STENCIL_OP StencilOp(dawn::StencilOperation op) {
+ D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
switch (op) {
- case dawn::StencilOperation::Keep:
+ case wgpu::StencilOperation::Keep:
return D3D12_STENCIL_OP_KEEP;
- case dawn::StencilOperation::Zero:
+ case wgpu::StencilOperation::Zero:
return D3D12_STENCIL_OP_ZERO;
- case dawn::StencilOperation::Replace:
+ case wgpu::StencilOperation::Replace:
return D3D12_STENCIL_OP_REPLACE;
- case dawn::StencilOperation::IncrementClamp:
+ case wgpu::StencilOperation::IncrementClamp:
return D3D12_STENCIL_OP_INCR_SAT;
- case dawn::StencilOperation::DecrementClamp:
+ case wgpu::StencilOperation::DecrementClamp:
return D3D12_STENCIL_OP_DECR_SAT;
- case dawn::StencilOperation::Invert:
+ case wgpu::StencilOperation::Invert:
return D3D12_STENCIL_OP_INVERT;
- case dawn::StencilOperation::IncrementWrap:
+ case wgpu::StencilOperation::IncrementWrap:
return D3D12_STENCIL_OP_INCR;
- case dawn::StencilOperation::DecrementWrap:
+ case wgpu::StencilOperation::DecrementWrap:
return D3D12_STENCIL_OP_DECR;
default:
UNREACHABLE();
@@ -313,9 +313,9 @@ namespace dawn_native { namespace d3d12 {
PerStage<ComPtr<ID3DBlob>> compiledShader;
ComPtr<ID3DBlob> errors;
- dawn::ShaderStage renderStages = dawn::ShaderStage::Vertex | dawn::ShaderStage::Fragment;
+ wgpu::ShaderStage renderStages = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
for (auto stage : IterateStages(renderStages)) {
- const ShaderModule* module = nullptr;
+ ShaderModule* module = nullptr;
const char* entryPoint = nullptr;
const char* compileTarget = nullptr;
D3D12_SHADER_BYTECODE* shader = nullptr;
@@ -359,14 +359,14 @@ namespace dawn_native { namespace d3d12 {
// D3D12 logs warnings if any empty input state is used
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
- if (GetAttributesSetMask().any()) {
+ if (GetAttributeLocationsUsed().any()) {
descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
}
descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
descriptorD3D12.RasterizerState.FrontCounterClockwise =
- (GetFrontFace() == dawn::FrontFace::CCW) ? TRUE : FALSE;
+ (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
descriptorD3D12.RasterizerState.DepthBias = D3D12_DEFAULT_DEPTH_BIAS;
descriptorD3D12.RasterizerState.DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP;
descriptorD3D12.RasterizerState.SlopeScaledDepthBias =
@@ -423,7 +423,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
unsigned int count = 0;
- for (auto i : IterateBitSet(GetAttributesSetMask())) {
+ for (auto i : IterateBitSet(GetAttributeLocationsUsed())) {
D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
const VertexAttributeInfo& attribute = GetAttribute(i);
@@ -433,9 +433,9 @@ namespace dawn_native { namespace d3d12 {
inputElementDescriptor.SemanticName = "TEXCOORD";
inputElementDescriptor.SemanticIndex = static_cast<uint32_t>(i);
inputElementDescriptor.Format = VertexFormatType(attribute.format);
- inputElementDescriptor.InputSlot = attribute.inputSlot;
+ inputElementDescriptor.InputSlot = attribute.vertexBufferSlot;
- const VertexBufferInfo& input = GetInput(attribute.inputSlot);
+ const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
inputElementDescriptor.AlignedByteOffset = attribute.offset;
inputElementDescriptor.InputSlotClass = InputStepModeFunction(input.stepMode);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.cpp
deleted file mode 100644
index e2822e64cf9..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocator.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/d3d12/ResourceAllocator.h"
-
-#include "dawn_native/d3d12/DeviceD3D12.h"
-
-namespace dawn_native { namespace d3d12 {
-
- namespace {
- static constexpr D3D12_HEAP_PROPERTIES kDefaultHeapProperties = {
- D3D12_HEAP_TYPE_DEFAULT, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0,
- 0};
-
- static constexpr D3D12_HEAP_PROPERTIES kUploadHeapProperties = {
- D3D12_HEAP_TYPE_UPLOAD, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0,
- 0};
-
- static constexpr D3D12_HEAP_PROPERTIES kReadbackHeapProperties = {
- D3D12_HEAP_TYPE_READBACK, D3D12_CPU_PAGE_PROPERTY_UNKNOWN, D3D12_MEMORY_POOL_UNKNOWN, 0,
- 0};
- } // namespace
-
- ResourceAllocator::ResourceAllocator(Device* device) : mDevice(device) {
- }
-
- ComPtr<ID3D12Resource> ResourceAllocator::Allocate(
- D3D12_HEAP_TYPE heapType,
- const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage) {
- const D3D12_HEAP_PROPERTIES* heapProperties = nullptr;
- switch (heapType) {
- case D3D12_HEAP_TYPE_DEFAULT:
- heapProperties = &kDefaultHeapProperties;
- break;
- case D3D12_HEAP_TYPE_UPLOAD:
- heapProperties = &kUploadHeapProperties;
- break;
- case D3D12_HEAP_TYPE_READBACK:
- heapProperties = &kReadbackHeapProperties;
- break;
- default:
- UNREACHABLE();
- }
-
- ComPtr<ID3D12Resource> resource;
-
- // TODO(enga@google.com): Use CreatePlacedResource
- ASSERT_SUCCESS(mDevice->GetD3D12Device()->CreateCommittedResource(
- heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage, nullptr,
- IID_PPV_ARGS(&resource)));
-
- return resource;
- }
-
- void ResourceAllocator::Release(ComPtr<ID3D12Resource> resource) {
- // Resources may still be in use on the GPU. Enqueue them so that we hold onto them until
- // GPU execution has completed
- mReleasedResources.Enqueue(resource, mDevice->GetPendingCommandSerial());
- }
-
- void ResourceAllocator::Tick(uint64_t lastCompletedSerial) {
- mReleasedResources.ClearUpTo(lastCompletedSerial);
- }
-
-}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
index b18c998fb83..5200a145c31 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -13,56 +13,268 @@
// limitations under the License.
#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
-#include "dawn_native/d3d12/Forward.h"
+
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn_native/d3d12/HeapD3D12.h"
namespace dawn_native { namespace d3d12 {
+ namespace {
+ D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Readback_OnlyBuffers:
+ case Readback_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_READBACK;
+ case Default_AllBuffersAndTextures:
+ case Default_OnlyBuffers:
+ case Default_OnlyNonRenderableOrDepthTextures:
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_TYPE_DEFAULT;
+ case Upload_OnlyBuffers:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_TYPE_UPLOAD;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
+ switch (resourceHeapKind) {
+ case Default_AllBuffersAndTextures:
+ case Readback_AllBuffersAndTextures:
+ case Upload_AllBuffersAndTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
+ case Default_OnlyBuffers:
+ case Readback_OnlyBuffers:
+ case Upload_OnlyBuffers:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
+ case Default_OnlyNonRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
+ case Default_OnlyRenderableOrDepthTextures:
+ return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
+ D3D12_HEAP_TYPE heapType,
+ D3D12_RESOURCE_FLAGS flags,
+ uint32_t resourceHeapTier) {
+ if (resourceHeapTier >= 2) {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_AllBuffersAndTextures;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_AllBuffersAndTextures;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ switch (dimension) {
+ case D3D12_RESOURCE_DIMENSION_BUFFER: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_UPLOAD:
+ return Upload_OnlyBuffers;
+ case D3D12_HEAP_TYPE_DEFAULT:
+ return Default_OnlyBuffers;
+ case D3D12_HEAP_TYPE_READBACK:
+ return Readback_OnlyBuffers;
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+ case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
+ switch (heapType) {
+ case D3D12_HEAP_TYPE_DEFAULT: {
+ if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+ (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
+ return Default_OnlyRenderableOrDepthTextures;
+ } else {
+ return Default_OnlyNonRenderableOrDepthTextures;
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } // namespace
ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+ mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
+ ? mDevice->GetDeviceInfo().resourceHeapTier
+ : 1;
+
+ for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
+ const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
+ mHeapAllocators[i] = std::make_unique<HeapAllocator>(
+ mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind));
+ mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
+ kMaxHeapSize, kMinHeapSize, mHeapAllocators[i].get());
+ }
}
ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags) {
- const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapType);
- ASSERT(heapTypeIndex < kNumHeapTypes);
-
- // Get the direct allocator using a tightly sized heap (aka CreateCommittedResource).
- CommittedResourceAllocator* allocator = mDirectResourceAllocators[heapTypeIndex].get();
- if (allocator == nullptr) {
- mDirectResourceAllocators[heapTypeIndex] =
- std::make_unique<CommittedResourceAllocator>(mDevice, heapType);
- allocator = mDirectResourceAllocators[heapTypeIndex].get();
+ D3D12_RESOURCE_STATES initialUsage) {
+ // TODO(bryan.bernhart@intel.com): Conditionally disable sub-allocation.
+ // For very large resources, there is no benefit to suballocate.
+ // For very small resources, it is inefficent to suballocate given the min. heap
+ // size could be much larger then the resource allocation.
+ // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
+ ResourceHeapAllocation subAllocation;
+ DAWN_TRY_ASSIGN(subAllocation,
+ CreatePlacedResource(heapType, resourceDescriptor, initialUsage));
+ if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+ return subAllocation;
}
- ResourceHeapAllocation allocation;
- DAWN_TRY_ASSIGN(allocation,
- allocator->Allocate(resourceDescriptor, initialUsage, heapFlags));
+ // If sub-allocation fails, fall-back to direct allocation (committed resource).
+ ResourceHeapAllocation directAllocation;
+ DAWN_TRY_ASSIGN(directAllocation,
+ CreateCommittedResource(heapType, resourceDescriptor, initialUsage));
- return allocation;
+ return directAllocation;
}
- size_t ResourceAllocatorManager::GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const {
- ASSERT(heapType > 0);
- ASSERT(static_cast<uint32_t>(heapType) <= kNumHeapTypes);
- return heapType - 1;
+ void ResourceAllocatorManager::Tick(Serial completedSerial) {
+ for (ResourceHeapAllocation& allocation :
+ mAllocationsToDelete.IterateUpTo(completedSerial)) {
+ if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
+ FreeMemory(allocation);
+ }
+ }
+ mAllocationsToDelete.ClearUpTo(completedSerial);
}
void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
return;
}
- CommittedResourceAllocator* allocator = nullptr;
- D3D12_HEAP_PROPERTIES heapProp;
- allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
- const size_t heapTypeIndex = GetD3D12HeapTypeToIndex(heapProp.Type);
- ASSERT(heapTypeIndex < kNumHeapTypes);
- allocator = mDirectResourceAllocators[heapTypeIndex].get();
- allocator->Deallocate(allocation);
- // Invalidate the underlying resource heap in case the client accidentally
+ mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+
+ // Invalidate the allocation immediately in case one accidentally
// calls DeallocateMemory again using the same allocation.
allocation.Invalidate();
+
+ ASSERT(allocation.GetD3D12Resource().Get() == nullptr);
+ }
+
+ void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+
+ D3D12_HEAP_PROPERTIES heapProp;
+ allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+
+ const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
+
+ const size_t resourceHeapKindIndex =
+ GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
+ resourceDescriptor.Flags, mResourceHeapTier);
+
+ mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
}
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ const size_t resourceHeapKindIndex =
+ GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
+ requestedResourceDescriptor.Flags, mResourceHeapTier);
+
+ // Small resources can take advantage of smaller alignments. For example,
+ // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+ // Must be non-depth or without render-target to use small resource alignment.
+ //
+ // Note: Only known to be used for small textures; however, MSDN suggests
+ // it could be extended for more cases. If so, this could default to always attempt small
+ // resource placement.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+ D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
+ resourceDescriptor.Alignment =
+ (resourceHeapKindIndex == Default_OnlyNonRenderableOrDepthTextures)
+ ? D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT
+ : requestedResourceDescriptor.Alignment;
+
+ D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+
+ // If the request for small resource alignment was rejected, let D3D tell us what the
+ // required alignment is for this resource.
+ if (resourceHeapKindIndex == Default_OnlyNonRenderableOrDepthTextures &&
+ resourceInfo.Alignment != D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT) {
+ resourceDescriptor.Alignment = 0;
+ resourceInfo =
+ mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+ }
+
+ BuddyMemoryAllocator* allocator =
+ mSubAllocatedResourceAllocators[resourceHeapKindIndex].get();
+
+ ResourceMemoryAllocation allocation;
+ DAWN_TRY_ASSIGN(allocation,
+ allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
+ if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return ResourceHeapAllocation{}; // invalid
+ }
+
+ ID3D12Heap* heap = static_cast<Heap*>(allocation.GetResourceHeap())->GetD3D12Heap().Get();
+
+ // With placed resources, a single heap can be reused.
+ // The resource placed at an offset is only reclaimed
+ // upon Tick or after the last command list using the resource has completed
+ // on the GPU. This means the same physical memory is not reused
+ // within the same command-list and does not require additional synchronization (aliasing
+ // barrier).
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ ComPtr<ID3D12Resource> placedResource;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreatePlacedResource(
+ heap, allocation.GetOffset(), &resourceDescriptor,
+ initialUsage, nullptr, IID_PPV_ARGS(&placedResource)),
+ "ID3D12Device::CreatePlacedResource"));
+
+ return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
+ std::move(placedResource)};
+ }
+
+ ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage) {
+ D3D12_HEAP_PROPERTIES heapProperties;
+ heapProperties.Type = heapType;
+ heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+ heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+ heapProperties.CreationNodeMask = 0;
+ heapProperties.VisibleNodeMask = 0;
+
+ // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
+ // provided to CreateCommittedResource.
+ ComPtr<ID3D12Resource> committedResource;
+ DAWN_TRY(
+ CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateCommittedResource(
+ &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor,
+ initialUsage, nullptr, IID_PPV_ARGS(&committedResource)),
+ "ID3D12Device::CreateCommittedResource"));
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+
+ return ResourceHeapAllocation{info,
+ /*offset*/ 0, std::move(committedResource)};
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
index d8f1cdb30ee..b60a024b27c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -15,7 +15,10 @@
#ifndef DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
#define DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
-#include "dawn_native/d3d12/CommittedResourceAllocatorD3D12.h"
+#include "common/SerialQueue.h"
+#include "dawn_native/BuddyMemoryAllocator.h"
+#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include <array>
@@ -23,8 +26,34 @@ namespace dawn_native { namespace d3d12 {
class Device;
- // Manages a list of resource allocators used by the device to create resources using multiple
- // allocation methods.
+ // Resource heap types + flags combinations are named after the D3D constants.
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
+ enum ResourceHeapKind {
+
+ // Resource heap tier 2
+ // Allows resource heaps to contain all buffer and textures types.
+ // This enables better heap re-use by avoiding the need for separate heaps and
+ // also reduces fragmentation.
+ Readback_AllBuffersAndTextures,
+ Upload_AllBuffersAndTextures,
+ Default_AllBuffersAndTextures,
+
+ // Resource heap tier 1
+ // Resource heaps only support types from a single resource category.
+ Readback_OnlyBuffers,
+ Upload_OnlyBuffers,
+ Default_OnlyBuffers,
+
+ Default_OnlyNonRenderableOrDepthTextures,
+ Default_OnlyRenderableOrDepthTextures,
+
+ EnumCount,
+ InvalidEnum = EnumCount,
+ };
+
+ // Manages a list of resource allocators used by the device to create resources using
+ // multiple allocation methods.
class ResourceAllocatorManager {
public:
ResourceAllocatorManager(Device* device);
@@ -32,31 +61,38 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ResourceHeapAllocation> AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
- D3D12_RESOURCE_STATES initialUsage,
- D3D12_HEAP_FLAGS heapFlags);
+ D3D12_RESOURCE_STATES initialUsage);
void DeallocateMemory(ResourceHeapAllocation& allocation);
+ void Tick(Serial lastCompletedSerial);
+
private:
- size_t GetD3D12HeapTypeToIndex(D3D12_HEAP_TYPE heapType) const;
+ void FreeMemory(ResourceHeapAllocation& allocation);
+
+ ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
+
+ ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
+ D3D12_HEAP_TYPE heapType,
+ const D3D12_RESOURCE_DESC& resourceDescriptor,
+ D3D12_RESOURCE_STATES initialUsage);
Device* mDevice;
+ uint32_t mResourceHeapTier;
- static constexpr uint32_t kNumHeapTypes = 4u; // Number of D3D12_HEAP_TYPE
+ static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll; // 32GB
+ static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll; // 4MB
- static_assert(D3D12_HEAP_TYPE_READBACK <= kNumHeapTypes,
- "Readback heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_UPLOAD <= kNumHeapTypes,
- "Upload heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_DEFAULT <= kNumHeapTypes,
- "Default heap type enum exceeds max heap types");
- static_assert(D3D12_HEAP_TYPE_CUSTOM <= kNumHeapTypes,
- "Custom heap type enum exceeds max heap types");
+ std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mSubAllocatedResourceAllocators;
+ std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
- std::array<std::unique_ptr<CommittedResourceAllocator>, kNumHeapTypes>
- mDirectResourceAllocators;
+ SerialQueue<ResourceHeapAllocation> mAllocationsToDelete;
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_RESOURCEALLOCATORMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
index 158e8900d19..bf805cb8d0d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -23,6 +23,11 @@ namespace dawn_native { namespace d3d12 {
: ResourceMemoryAllocation(info, offset, nullptr), mResource(std::move(resource)) {
}
+ void ResourceHeapAllocation::Invalidate() {
+ ResourceMemoryAllocation::Invalidate();
+ mResource.Reset();
+ }
+
ComPtr<ID3D12Resource> ResourceHeapAllocation::GetD3D12Resource() const {
return mResource;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
index 8230857dbfb..d764a9675f2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
@@ -26,7 +26,9 @@ namespace dawn_native { namespace d3d12 {
ResourceHeapAllocation(const AllocationInfo& info,
uint64_t offset,
ComPtr<ID3D12Resource> resource);
- ~ResourceHeapAllocation() = default;
+ ~ResourceHeapAllocation() override = default;
+
+ void Invalidate() override;
ComPtr<ID3D12Resource> GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
@@ -34,6 +36,7 @@ namespace dawn_native { namespace d3d12 {
private:
ComPtr<ID3D12Resource> mResource;
};
+
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_RESOURCEHEAPALLOCATIOND3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
index a2d5b5f6313..ecea147ba9e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerD3D12.cpp
@@ -20,13 +20,13 @@
namespace dawn_native { namespace d3d12 {
namespace {
- D3D12_TEXTURE_ADDRESS_MODE AddressMode(dawn::AddressMode mode) {
+ D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
switch (mode) {
- case dawn::AddressMode::Repeat:
+ case wgpu::AddressMode::Repeat:
return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
- case dawn::AddressMode::MirrorRepeat:
+ case wgpu::AddressMode::MirrorRepeat:
return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
- case dawn::AddressMode::ClampToEdge:
+ case wgpu::AddressMode::ClampToEdge:
return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
default:
UNREACHABLE();
@@ -54,25 +54,25 @@ namespace dawn_native { namespace d3d12 {
uint8_t mode = 0;
switch (descriptor->minFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
break;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
mode += 16;
break;
}
switch (descriptor->magFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
break;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
mode += 4;
break;
}
switch (descriptor->mipmapFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
break;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
mode += 1;
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 872e3971ecb..3f841d1c547 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -24,31 +24,76 @@
namespace dawn_native { namespace d3d12 {
+ // static
+ ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor) {
+ std::unique_ptr<ShaderModule> module(new ShaderModule(device, descriptor));
+ if (!module)
+ return DAWN_VALIDATION_ERROR("Unable to create ShaderModule");
+ DAWN_TRY(module->Initialize(descriptor));
+ return module.release();
+ }
+
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
: ShaderModuleBase(device, descriptor) {
+ }
+
+ MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
mSpirv.assign(descriptor->code, descriptor->code + descriptor->codeSize);
- spirv_cross::CompilerHLSL compiler(mSpirv);
- ExtractSpirvInfo(compiler);
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompileOptions options;
+
+ options.SetHLSLShaderModel(51);
+ // PointCoord and PointSize are not supported in HLSL
+ // TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
+ // required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
+ // but should be removed once WebGPU requires there is no gl_PointSize builtin.
+ // See https://github.com/gpuweb/gpuweb/issues/332
+ options.SetHLSLPointCoordCompat(true);
+ options.SetHLSLPointSizeCompat(true);
+
+ shaderc_spvc_status status =
+ mSpvcContext.InitializeForHlsl(descriptor->code, descriptor->codeSize, options);
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
+ }
+
+ spirv_cross::Compiler* compiler =
+ reinterpret_cast<spirv_cross::Compiler*>(mSpvcContext.GetCompiler());
+ ExtractSpirvInfo(*compiler);
+ } else {
+ spirv_cross::CompilerHLSL compiler(descriptor->code, descriptor->codeSize);
+ ExtractSpirvInfo(compiler);
+ }
+ return {};
}
- const std::string ShaderModule::GetHLSLSource(PipelineLayout* layout) const {
- spirv_cross::CompilerHLSL compiler(mSpirv);
+ const std::string ShaderModule::GetHLSLSource(PipelineLayout* layout) {
+ std::unique_ptr<spirv_cross::CompilerHLSL> compiler_impl;
+ spirv_cross::CompilerHLSL* compiler;
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ compiler = reinterpret_cast<spirv_cross::CompilerHLSL*>(mSpvcContext.GetCompiler());
+ // TODO(rharrison): Check status & have some sort of meaningful error path
+ } else {
+ // If these options are changed, the values in DawnSPIRVCrossHLSLFastFuzzer.cpp need to
+ // be updated.
+ spirv_cross::CompilerGLSL::Options options_glsl;
- // If these options are changed, the values in DawnSPIRVCrossHLSLFastFuzzer.cpp need to be
- // updated.
- spirv_cross::CompilerGLSL::Options options_glsl;
- compiler.set_common_options(options_glsl);
+ spirv_cross::CompilerHLSL::Options options_hlsl;
+ options_hlsl.shader_model = 51;
+ // PointCoord and PointSize are not supported in HLSL
+ // TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
+ // required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
+ // but should be removed once WebGPU requires there is no gl_PointSize builtin.
+ // See https://github.com/gpuweb/gpuweb/issues/332
+ options_hlsl.point_coord_compat = true;
+ options_hlsl.point_size_compat = true;
- spirv_cross::CompilerHLSL::Options options_hlsl;
- options_hlsl.shader_model = 51;
- // PointCoord and PointSize are not supported in HLSL
- // TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
- // required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
- // but should be removed once WebGPU requires there is no gl_PointSize builtin.
- // See https://github.com/gpuweb/gpuweb/issues/332
- options_hlsl.point_coord_compat = true;
- options_hlsl.point_size_compat = true;
- compiler.set_hlsl_options(options_hlsl);
+ compiler_impl = std::make_unique<spirv_cross::CompilerHLSL>(mSpirv);
+ compiler = compiler_impl.get();
+ compiler->set_common_options(options_glsl);
+ compiler->set_hlsl_options(options_hlsl);
+ }
const ModuleBindingInfo& moduleBindingInfo = GetBindingInfo();
for (uint32_t group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
@@ -59,11 +104,18 @@ namespace dawn_native { namespace d3d12 {
const BindingInfo& bindingInfo = groupBindingInfo[binding];
if (bindingInfo.used) {
uint32_t bindingOffset = bindingOffsets[binding];
- compiler.set_decoration(bindingInfo.id, spv::DecorationBinding, bindingOffset);
+ compiler->set_decoration(bindingInfo.id, spv::DecorationBinding, bindingOffset);
}
}
}
- return compiler.compile();
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompilationResult result;
+ mSpvcContext.CompileShader(&result);
+ // TODO(rharrison): Check status & have some sort of meaningful error path
+ return result.GetStringOutput();
+ } else {
+ return compiler->compile();
+ }
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
index 7cafd1cb218..bcec904779a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.h
@@ -24,11 +24,15 @@ namespace dawn_native { namespace d3d12 {
class ShaderModule : public ShaderModuleBase {
public:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ static ResultOrError<ShaderModule*> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor);
- const std::string GetHLSLSource(PipelineLayout* layout) const;
+ const std::string GetHLSLSource(PipelineLayout* layout);
private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
+
std::vector<uint32_t> mSpirv;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index 9e6c2bd7c6c..c2b2cc1f3cd 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -13,6 +13,7 @@
// limitations under the License.
#include "dawn_native/d3d12/StagingBufferD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
namespace dawn_native { namespace d3d12 {
@@ -35,18 +36,19 @@ namespace dawn_native { namespace d3d12 {
resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
- DAWN_TRY_ASSIGN(mUploadHeap, mDevice->AllocateMemory(
- D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
- D3D12_RESOURCE_STATE_GENERIC_READ, D3D12_HEAP_FLAG_NONE));
+ DAWN_TRY_ASSIGN(mUploadHeap,
+ mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
+ D3D12_RESOURCE_STATE_GENERIC_READ));
- if (FAILED(GetResource()->Map(0, nullptr, &mMappedPointer))) {
- return DAWN_DEVICE_LOST_ERROR("Unable to map staging buffer.");
- }
-
- return {};
+ return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
}
StagingBuffer::~StagingBuffer() {
+ // Always check if the allocation is valid before Unmap.
+ // The resource would not exist had it failed to allocate.
+ if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
+ return;
+ }
// Invalidate the CPU virtual address & flush cache (if needed).
GetResource()->Unmap(0, nullptr);
mMappedPointer = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
index 7d24b35b551..aec8a61c288 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.cpp
@@ -25,11 +25,11 @@ namespace dawn_native { namespace d3d12 {
: SwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextD3D12 wsiContext = {};
- wsiContext.device = reinterpret_cast<DawnDevice>(GetDevice());
+ wsiContext.device = reinterpret_cast<WGPUDevice>(GetDevice());
im.Init(im.userData, &wsiContext);
- ASSERT(im.textureUsage != DAWN_TEXTURE_USAGE_NONE);
- mTextureUsage = static_cast<dawn::TextureUsage>(im.textureUsage);
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
}
SwapChain::~SwapChain() {
@@ -40,12 +40,12 @@ namespace dawn_native { namespace d3d12 {
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(dawn::ErrorType::Unknown, error);
+ GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
return nullptr;
}
- ID3D12Resource* nativeTexture = static_cast<ID3D12Resource*>(next.texture.ptr);
- return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture);
+ ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
+ return new Texture(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture));
}
MaybeError SwapChain::OnBeforePresent(TextureBase* texture) {
@@ -57,7 +57,7 @@ namespace dawn_native { namespace d3d12 {
// Perform the necessary transition for the texture to be presented.
ToBackend(texture)->TransitionUsageNow(commandContext, mTextureUsage);
- DAWN_TRY(device->ExecuteCommandContext(nullptr));
+ DAWN_TRY(device->ExecutePendingCommandContext());
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
index 151994c9b3e..4b83ce4d09b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SwapChainD3D12.h
@@ -30,7 +30,7 @@ namespace dawn_native { namespace d3d12 {
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
MaybeError OnBeforePresent(TextureBase* texture) override;
- dawn::TextureUsage mTextureUsage;
+ wgpu::TextureUsage mTextureUsage;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index a26c94843a0..f37dc76731c 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -19,9 +19,10 @@
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/Error.h"
#include "dawn_native/d3d12/BufferD3D12.h"
+#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DescriptorHeapAllocator.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
-#include "dawn_native/d3d12/ResourceAllocator.h"
+#include "dawn_native/d3d12/ResourceAllocatorManagerD3D12.h"
#include "dawn_native/d3d12/StagingBufferD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
#include "dawn_native/d3d12/UtilsD3D12.h"
@@ -29,28 +30,28 @@
namespace dawn_native { namespace d3d12 {
namespace {
- D3D12_RESOURCE_STATES D3D12TextureUsage(dawn::TextureUsage usage, const Format& format) {
+ D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
// Present is an exclusive flag.
- if (usage & dawn::TextureUsage::Present) {
+ if (usage & wgpu::TextureUsage::Present) {
return D3D12_RESOURCE_STATE_PRESENT;
}
- if (usage & dawn::TextureUsage::CopySrc) {
+ if (usage & wgpu::TextureUsage::CopySrc) {
resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
}
- if (usage & dawn::TextureUsage::CopyDst) {
+ if (usage & wgpu::TextureUsage::CopyDst) {
resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
}
- if (usage & dawn::TextureUsage::Sampled) {
+ if (usage & wgpu::TextureUsage::Sampled) {
resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
}
- if (usage & dawn::TextureUsage::Storage) {
+ if (usage & wgpu::TextureUsage::Storage) {
resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
}
- if (usage & dawn::TextureUsage::OutputAttachment) {
+ if (usage & wgpu::TextureUsage::OutputAttachment) {
if (format.HasDepthOrStencil()) {
resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
} else {
@@ -61,12 +62,12 @@ namespace dawn_native { namespace d3d12 {
return resourceState;
}
- D3D12_RESOURCE_FLAGS D3D12ResourceFlags(dawn::TextureUsage usage,
+ D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
const Format& format,
bool isMultisampledTexture) {
D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
- if (usage & dawn::TextureUsage::Storage) {
+ if (usage & wgpu::TextureUsage::Storage) {
flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
@@ -78,7 +79,7 @@ namespace dawn_native { namespace d3d12 {
// flag is invalid.
// TODO(natlee@microsoft.com, jiawei.shao@intel.com): do not require render target for
// lazy clearing.
- if ((usage & dawn::TextureUsage::OutputAttachment) || isMultisampledTexture ||
+ if ((usage & wgpu::TextureUsage::OutputAttachment) || isMultisampledTexture ||
!format.isCompressed) {
if (format.HasDepthOrStencil()) {
flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
@@ -92,9 +93,9 @@ namespace dawn_native { namespace d3d12 {
return flags;
}
- D3D12_RESOURCE_DIMENSION D3D12TextureDimension(dawn::TextureDimension dimension) {
+ D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
switch (dimension) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
default:
UNREACHABLE();
@@ -102,117 +103,117 @@ namespace dawn_native { namespace d3d12 {
}
} // namespace
- DXGI_FORMAT D3D12TextureFormat(dawn::TextureFormat format) {
+ DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
switch (format) {
- case dawn::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Unorm:
return DXGI_FORMAT_R8_UNORM;
- case dawn::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Snorm:
return DXGI_FORMAT_R8_SNORM;
- case dawn::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Uint:
return DXGI_FORMAT_R8_UINT;
- case dawn::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R8Sint:
return DXGI_FORMAT_R8_SINT;
- case dawn::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Uint:
return DXGI_FORMAT_R16_UINT;
- case dawn::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Sint:
return DXGI_FORMAT_R16_SINT;
- case dawn::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R16Float:
return DXGI_FORMAT_R16_FLOAT;
- case dawn::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Unorm:
return DXGI_FORMAT_R8G8_UNORM;
- case dawn::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Snorm:
return DXGI_FORMAT_R8G8_SNORM;
- case dawn::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Uint:
return DXGI_FORMAT_R8G8_UINT;
- case dawn::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RG8Sint:
return DXGI_FORMAT_R8G8_SINT;
- case dawn::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Uint:
return DXGI_FORMAT_R32_UINT;
- case dawn::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Sint:
return DXGI_FORMAT_R32_SINT;
- case dawn::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Float:
return DXGI_FORMAT_R32_FLOAT;
- case dawn::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Uint:
return DXGI_FORMAT_R16G16_UINT;
- case dawn::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Sint:
return DXGI_FORMAT_R16G16_SINT;
- case dawn::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG16Float:
return DXGI_FORMAT_R16G16_FLOAT;
- case dawn::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
return DXGI_FORMAT_R8G8B8A8_UNORM;
- case dawn::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
- case dawn::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
return DXGI_FORMAT_R8G8B8A8_SNORM;
- case dawn::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
return DXGI_FORMAT_R8G8B8A8_UINT;
- case dawn::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
return DXGI_FORMAT_R8G8B8A8_SINT;
- case dawn::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
return DXGI_FORMAT_B8G8R8A8_UNORM;
- case dawn::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
- case dawn::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
return DXGI_FORMAT_R10G10B10A2_UNORM;
- case dawn::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Float:
return DXGI_FORMAT_R11G11B10_FLOAT;
- case dawn::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Uint:
return DXGI_FORMAT_R32G32_UINT;
- case dawn::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Sint:
return DXGI_FORMAT_R32G32_SINT;
- case dawn::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Float:
return DXGI_FORMAT_R32G32_FLOAT;
- case dawn::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
return DXGI_FORMAT_R16G16B16A16_UINT;
- case dawn::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
return DXGI_FORMAT_R16G16B16A16_SINT;
- case dawn::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA16Float:
return DXGI_FORMAT_R16G16B16A16_FLOAT;
- case dawn::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
return DXGI_FORMAT_R32G32B32A32_UINT;
- case dawn::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
return DXGI_FORMAT_R32G32B32A32_SINT;
- case dawn::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Float:
return DXGI_FORMAT_R32G32B32A32_FLOAT;
- case dawn::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth32Float:
return DXGI_FORMAT_D32_FLOAT;
- case dawn::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24Plus:
return DXGI_FORMAT_D32_FLOAT;
- case dawn::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
- case dawn::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnorm:
return DXGI_FORMAT_BC1_UNORM;
- case dawn::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
return DXGI_FORMAT_BC1_UNORM_SRGB;
- case dawn::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
return DXGI_FORMAT_BC2_UNORM;
- case dawn::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
return DXGI_FORMAT_BC2_UNORM_SRGB;
- case dawn::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
return DXGI_FORMAT_BC3_UNORM;
- case dawn::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
return DXGI_FORMAT_BC3_UNORM_SRGB;
- case dawn::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
return DXGI_FORMAT_BC4_SNORM;
- case dawn::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
return DXGI_FORMAT_BC4_UNORM;
- case dawn::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
return DXGI_FORMAT_BC5_SNORM;
- case dawn::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
return DXGI_FORMAT_BC5_UNORM;
- case dawn::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
return DXGI_FORMAT_BC6H_SF16;
- case dawn::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
return DXGI_FORMAT_BC6H_UF16;
- case dawn::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
return DXGI_FORMAT_BC7_UNORM;
- case dawn::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return DXGI_FORMAT_BC7_UNORM_SRGB;
default:
@@ -221,7 +222,7 @@ namespace dawn_native { namespace d3d12 {
}
MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
- if (descriptor->dimension != dawn::TextureDimension::e2D) {
+ if (descriptor->dimension != wgpu::TextureDimension::e2D) {
return DAWN_VALIDATION_ERROR("Texture must be 2D");
}
@@ -272,13 +273,55 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<TextureBase*> Texture::Create(Device* device,
const TextureDescriptor* descriptor) {
- Ref<Texture> dawnTexture = AcquireRef(new Texture(device, descriptor));
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
return dawnTexture.Detach();
}
- Texture::Texture(Device* device, const TextureDescriptor* descriptor)
- : TextureBase(device, descriptor, TextureState::OwnedInternal) {
+ ResultOrError<TextureBase*> Texture::Create(Device* device,
+ const TextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey) {
+ Ref<Texture> dawnTexture =
+ AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+ DAWN_TRY(
+ dawnTexture->InitializeAsExternalTexture(descriptor, sharedHandle, acquireMutexKey));
+ return dawnTexture.Detach();
+ }
+
+ MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey) {
+ Device* dawnDevice = ToBackend(GetDevice());
+ DAWN_TRY(ValidateTextureDescriptor(dawnDevice, descriptor));
+ DAWN_TRY(ValidateTextureDescriptorCanBeWrapped(descriptor));
+
+ ComPtr<ID3D12Resource> d3d12Resource;
+ DAWN_TRY(CheckHRESULT(dawnDevice->GetD3D12Device()->OpenSharedHandle(
+ sharedHandle, IID_PPV_ARGS(&d3d12Resource)),
+ "D3D12 opening shared handle"));
+
+ DAWN_TRY(ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), descriptor));
+
+ ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+ DAWN_TRY_ASSIGN(dxgiKeyedMutex,
+ dawnDevice->CreateKeyedMutexForTexture(d3d12Resource.Get()));
+
+ DAWN_TRY(CheckHRESULT(dxgiKeyedMutex->AcquireSync(acquireMutexKey, INFINITE),
+ "D3D12 acquiring shared mutex"));
+
+ mAcquireMutexKey = acquireMutexKey;
+ mDxgiKeyedMutex = std::move(dxgiKeyedMutex);
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ mResourceAllocation = {info, 0, std::move(d3d12Resource)};
+
+ SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
+ descriptor->arrayLayerCount);
+
+ return {};
}
MaybeError Texture::InitializeAsInternalTexture() {
@@ -300,10 +343,10 @@ namespace dawn_native { namespace d3d12 {
resourceDescriptor.Flags =
D3D12ResourceFlags(GetUsage(), GetFormat(), IsMultisampledTexture());
- mResource = ToBackend(GetDevice())
- ->GetResourceAllocator()
- ->Allocate(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
- D3D12_RESOURCE_STATE_COMMON);
+ DAWN_TRY_ASSIGN(mResourceAllocation,
+ ToBackend(GetDevice())
+ ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
+ D3D12_RESOURCE_STATE_COMMON));
Device* device = ToBackend(GetDevice());
@@ -318,12 +361,14 @@ namespace dawn_native { namespace d3d12 {
return {};
}
- // With this constructor, the lifetime of the ID3D12Resource is externally managed.
Texture::Texture(Device* device,
const TextureDescriptor* descriptor,
ComPtr<ID3D12Resource> nativeTexture)
- : TextureBase(device, descriptor, TextureState::OwnedExternal),
- mResource(std::move(nativeTexture)) {
+ : TextureBase(device, descriptor, TextureState::OwnedExternal) {
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ mResourceAllocation = {info, 0, std::move(nativeTexture)};
+
SetIsSubresourceContentInitialized(true, 0, descriptor->mipLevelCount, 0,
descriptor->arrayLayerCount);
}
@@ -333,9 +378,13 @@ namespace dawn_native { namespace d3d12 {
}
void Texture::DestroyImpl() {
- // If we own the resource, release it.
- ToBackend(GetDevice())->GetResourceAllocator()->Release(mResource);
- mResource = nullptr;
+ Device* device = ToBackend(GetDevice());
+ device->DeallocateMemory(mResourceAllocation);
+
+ if (mDxgiKeyedMutex != nullptr) {
+ mDxgiKeyedMutex->ReleaseSync(mAcquireMutexKey + 1);
+ device->ReleaseKeyedMutexForTexture(std::move(mDxgiKeyedMutex));
+ }
}
DXGI_FORMAT Texture::GetD3D12Format() const {
@@ -343,12 +392,12 @@ namespace dawn_native { namespace d3d12 {
}
ID3D12Resource* Texture::GetD3D12Resource() const {
- return mResource.Get();
+ return mResourceAllocation.GetD3D12Resource().Get();
}
UINT16 Texture::GetDepthOrArraySize() {
switch (GetDimension()) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
return static_cast<UINT16>(GetArrayLayers());
default:
UNREACHABLE();
@@ -360,7 +409,7 @@ namespace dawn_native { namespace d3d12 {
// cause subsequent errors.
bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
- dawn::TextureUsage newUsage) {
+ wgpu::TextureUsage newUsage) {
return TransitionUsageAndGetResourceBarrier(commandContext, barrier,
D3D12TextureUsage(newUsage, GetFormat()));
}
@@ -371,6 +420,13 @@ namespace dawn_native { namespace d3d12 {
bool Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
D3D12_RESOURCE_STATES newState) {
+ // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+ // must be acquired before command list submission to ensure work from the other queues
+ // has finished. See Device::ExecuteCommandContext.
+ if (mDxgiKeyedMutex != nullptr) {
+ commandContext->AddToSharedTextureList(this);
+ }
+
// Avoid transitioning the texture when it isn't needed.
// TODO(cwallez@chromium.org): Need some form of UAV barriers at some point.
if (mLastState == newState) {
@@ -426,7 +482,7 @@ namespace dawn_native { namespace d3d12 {
barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->Transition.pResource = mResource.Get();
+ barrier->Transition.pResource = GetD3D12Resource();
barrier->Transition.StateBefore = lastState;
barrier->Transition.StateAfter = newState;
barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
@@ -437,7 +493,7 @@ namespace dawn_native { namespace d3d12 {
}
void Texture::TransitionUsageNow(CommandRecordingContext* commandContext,
- dawn::TextureUsage usage) {
+ wgpu::TextureUsage usage) {
TransitionUsageNow(commandContext, D3D12TextureUsage(usage, GetFormat()));
}
@@ -453,7 +509,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t baseMipLevel,
uint32_t baseArrayLayer,
uint32_t layerCount) const {
- ASSERT(GetDimension() == dawn::TextureDimension::e2D);
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
rtvDesc.Format = GetD3D12Format();
if (IsMultisampledTexture()) {
@@ -520,7 +576,7 @@ namespace dawn_native { namespace d3d12 {
D3D12_DESCRIPTOR_HEAP_TYPE_DSV, 1));
D3D12_CPU_DESCRIPTOR_HANDLE dsvHandle = dsvHeap.GetCPUHandle(0);
D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc = GetDSVDescriptor(baseMipLevel);
- device->GetD3D12Device()->CreateDepthStencilView(mResource.Get(), &dsvDesc,
+ device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
dsvHandle);
D3D12_CLEAR_FLAGS clearFlags = {};
@@ -547,7 +603,7 @@ namespace dawn_native { namespace d3d12 {
for (uint32_t i = baseMipLevel; i < baseMipLevel + levelCount; i++) {
D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
GetRTVDescriptor(i, baseArrayLayer, layerCount);
- device->GetD3D12Device()->CreateRenderTargetView(mResource.Get(), &rtvDesc,
+ device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
rtvHandle);
commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
}
@@ -640,9 +696,9 @@ namespace dawn_native { namespace d3d12 {
// TODO(jiawei.shao@intel.com): support more texture view dimensions.
// TODO(jiawei.shao@intel.com): support creating SRV on multisampled textures.
switch (descriptor->dimension) {
- case dawn::TextureViewDimension::e2D:
- case dawn::TextureViewDimension::e2DArray:
- ASSERT(texture->GetDimension() == dawn::TextureDimension::e2D);
+ case wgpu::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2DArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
@@ -651,9 +707,9 @@ namespace dawn_native { namespace d3d12 {
mSrvDesc.Texture2DArray.PlaneSlice = 0;
mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
break;
- case dawn::TextureViewDimension::Cube:
- case dawn::TextureViewDimension::CubeArray:
- ASSERT(texture->GetDimension() == dawn::TextureDimension::e2D);
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
ASSERT(descriptor->arrayLayerCount % 6 == 0);
mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index 162b50ee2f2..332ab5a610b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -18,6 +18,7 @@
#include "common/Serial.h"
#include "dawn_native/Texture.h"
+#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -25,7 +26,7 @@ namespace dawn_native { namespace d3d12 {
class CommandRecordingContext;
class Device;
- DXGI_FORMAT D3D12TextureFormat(dawn::TextureFormat format);
+ DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
const TextureDescriptor* descriptor);
MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
@@ -34,17 +35,22 @@ namespace dawn_native { namespace d3d12 {
public:
static ResultOrError<TextureBase*> Create(Device* device,
const TextureDescriptor* descriptor);
+ static ResultOrError<TextureBase*> Create(Device* device,
+ const TextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey);
Texture(Device* device,
const TextureDescriptor* descriptor,
- ComPtr<ID3D12Resource> nativeTexture);
+ ComPtr<ID3D12Resource> d3d12Texture);
+
~Texture();
DXGI_FORMAT GetD3D12Format() const;
ID3D12Resource* GetD3D12Resource() const;
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
- dawn::TextureUsage newUsage);
- void TransitionUsageNow(CommandRecordingContext* commandContext, dawn::TextureUsage usage);
+ wgpu::TextureUsage newUsage);
+ void TransitionUsageNow(CommandRecordingContext* commandContext, wgpu::TextureUsage usage);
void TransitionUsageNow(CommandRecordingContext* commandContext,
D3D12_RESOURCE_STATES newState);
@@ -59,8 +65,12 @@ namespace dawn_native { namespace d3d12 {
uint32_t layerCount);
private:
- Texture(Device* device, const TextureDescriptor* descriptor);
+ using TextureBase::TextureBase;
+
MaybeError InitializeAsInternalTexture();
+ MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey);
// Dawn API
void DestroyImpl() override;
@@ -77,11 +87,14 @@ namespace dawn_native { namespace d3d12 {
D3D12_RESOURCE_BARRIER* barrier,
D3D12_RESOURCE_STATES newState);
- ComPtr<ID3D12Resource> mResource;
+ ResourceHeapAllocation mResourceAllocation;
D3D12_RESOURCE_STATES mLastState = D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON;
Serial mLastUsedSerial = UINT64_MAX;
bool mValidToDecay = false;
+
+ Serial mAcquireMutexKey = 0;
+ ComPtr<IDXGIKeyedMutex> mDxgiKeyedMutex;
};
class TextureView : public TextureViewBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index 5db58905547..a8acec9708a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -18,23 +18,23 @@
namespace dawn_native { namespace d3d12 {
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(dawn::CompareFunction func) {
+ D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
switch (func) {
- case dawn::CompareFunction::Always:
+ case wgpu::CompareFunction::Always:
return D3D12_COMPARISON_FUNC_ALWAYS;
- case dawn::CompareFunction::Equal:
+ case wgpu::CompareFunction::Equal:
return D3D12_COMPARISON_FUNC_EQUAL;
- case dawn::CompareFunction::Greater:
+ case wgpu::CompareFunction::Greater:
return D3D12_COMPARISON_FUNC_GREATER;
- case dawn::CompareFunction::GreaterEqual:
+ case wgpu::CompareFunction::GreaterEqual:
return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
- case dawn::CompareFunction::Less:
+ case wgpu::CompareFunction::Less:
return D3D12_COMPARISON_FUNC_LESS;
- case dawn::CompareFunction::LessEqual:
+ case wgpu::CompareFunction::LessEqual:
return D3D12_COMPARISON_FUNC_LESS_EQUAL;
- case dawn::CompareFunction::Never:
+ case wgpu::CompareFunction::Never:
return D3D12_COMPARISON_FUNC_NEVER;
- case dawn::CompareFunction::NotEqual:
+ case wgpu::CompareFunction::NotEqual:
return D3D12_COMPARISON_FUNC_NOT_EQUAL;
default:
UNREACHABLE();
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index 2566a42d9b1..36a5abe4ab3 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -23,7 +23,7 @@
namespace dawn_native { namespace d3d12 {
- D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(dawn::CompareFunction func);
+ D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
uint32_t level,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h b/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
index 6dfa2fdd867..a64486c64bb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/d3d12_platform.h
@@ -15,6 +15,8 @@
#ifndef DAWNNATIVE_D3D12_D3D12PLATFORM_H_
#define DAWNNATIVE_D3D12_D3D12PLATFORM_H_
+#include <d3d11_2.h>
+#include <d3d11on12.h>
#include <d3d12.h>
#include <dxgi1_4.h>
#include <wrl.h>
diff --git a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h b/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
index 795c371225c..52ca9164595 100644
--- a/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
+++ b/chromium/third_party/dawn/src/dawn_native/dawn_platform.h
@@ -15,11 +15,11 @@
#ifndef DAWNNATIVE_DAWNPLATFORM_H_
#define DAWNNATIVE_DAWNPLATFORM_H_
-// Use dawncpp to have the enum and bitfield definitions
-#include <dawn/dawncpp.h>
+// Use webgpu_cpp to have the enum and bitfield definitions
+#include <dawn/webgpu_cpp.h>
-// Use our autogenerated version of the dawn structures that point to dawn_native object types
-// (dawn::Buffer is dawn_native::BufferBase*)
-#include <dawn_native/dawn_structs_autogen.h>
+// Use our autogenerated version of the wgpu structures that point to dawn_native object types
+// (wgpu::Buffer is dawn_native::BufferBase*)
+#include <dawn_native/wgpu_structs_autogen.h>
#endif // DAWNNATIVE_DAWNPLATFORM_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
index 076e2b8d69e..c61b029cdb4 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
@@ -25,7 +25,7 @@ namespace dawn_native { namespace metal {
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) {
MTLResourceOptions storageMode;
- if (GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) {
+ if (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) {
storageMode = MTLResourceStorageModeShared;
} else {
storageMode = MTLResourceStorageModePrivate;
@@ -35,7 +35,7 @@ namespace dawn_native { namespace metal {
// Metal validation layer requires the size of uniform buffer and storage buffer to be no
// less than the size of the buffer block defined in shader, and the overall size of the
// buffer must be aligned to the largest alignment of its members.
- if (GetUsage() & (dawn::BufferUsage::Uniform | dawn::BufferUsage::Storage)) {
+ if (GetUsage() & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage)) {
currentSize = Align(currentSize, kMinUniformOrStorageBufferAlignment);
}
@@ -53,15 +53,15 @@ namespace dawn_native { namespace metal {
void Buffer::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
char* data = reinterpret_cast<char*>([mMtlBuffer contents]);
if (isWrite) {
- CallMapWriteCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
} else {
- CallMapReadCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
}
}
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
- return (GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) != 0;
+ return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
index 48d46714d69..640d19666b6 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
@@ -21,7 +21,7 @@
#import <Metal/Metal.h>
namespace dawn_native {
- class CommandEncoderBase;
+ class CommandEncoder;
}
namespace dawn_native { namespace metal {
@@ -31,7 +31,7 @@ namespace dawn_native { namespace metal {
class CommandBuffer : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
void FillCommands(id<MTLCommandBuffer> commandBuffer);
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index db0794f4507..38294dd63c7 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -56,7 +56,7 @@ namespace dawn_native { namespace metal {
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
auto& attachmentInfo = renderPass->colorAttachments[i];
- if (attachmentInfo.loadOp == dawn::LoadOp::Clear) {
+ if (attachmentInfo.loadOp == wgpu::LoadOp::Clear) {
descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
descriptor.colorAttachments[i].clearColor =
MTLClearColorMake(attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
@@ -70,7 +70,7 @@ namespace dawn_native { namespace metal {
descriptor.colorAttachments[i].level = attachmentInfo.view->GetBaseMipLevel();
descriptor.colorAttachments[i].slice = attachmentInfo.view->GetBaseArrayLayer();
- if (attachmentInfo.storeOp == dawn::StoreOp::Store) {
+ if (attachmentInfo.storeOp == wgpu::StoreOp::Store) {
if (attachmentInfo.resolveTarget.Get() != nullptr) {
descriptor.colorAttachments[i].resolveTexture =
ToBackend(attachmentInfo.resolveTarget->GetTexture())->GetMTLTexture();
@@ -98,7 +98,7 @@ namespace dawn_native { namespace metal {
descriptor.depthAttachment.texture = texture;
descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- if (attachmentInfo.depthLoadOp == dawn::LoadOp::Clear) {
+ if (attachmentInfo.depthLoadOp == wgpu::LoadOp::Clear) {
descriptor.depthAttachment.loadAction = MTLLoadActionClear;
descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
} else {
@@ -110,7 +110,7 @@ namespace dawn_native { namespace metal {
descriptor.stencilAttachment.texture = texture;
descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- if (attachmentInfo.stencilLoadOp == dawn::LoadOp::Clear) {
+ if (attachmentInfo.stencilLoadOp == wgpu::LoadOp::Clear) {
descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
} else {
@@ -201,21 +201,21 @@ namespace dawn_native { namespace metal {
// length of storage buffers and can apply them to the reserved "buffer length buffer" when
// needed for a draw or a dispatch.
struct StorageBufferLengthTracker {
- dawn::ShaderStage dirtyStages = dawn::ShaderStage::None;
+ wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
// The lengths of buffers are stored as 32bit integers because that is the width the
// MSL code generated by SPIRV-Cross expects.
PerStage<std::array<uint32_t, kGenericMetalBufferSlots>> data;
void Apply(id<MTLRenderCommandEncoder> render, RenderPipeline* pipeline) {
- dawn::ShaderStage stagesToApply =
+ wgpu::ShaderStage stagesToApply =
dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
- if (stagesToApply == dawn::ShaderStage::None) {
+ if (stagesToApply == wgpu::ShaderStage::None) {
return;
}
- if (stagesToApply & dawn::ShaderStage::Vertex) {
+ if (stagesToApply & wgpu::ShaderStage::Vertex) {
uint32_t bufferCount = ToBackend(pipeline->GetLayout())
->GetBufferBindingCount(SingleShaderStage::Vertex);
[render setVertexBytes:data[SingleShaderStage::Vertex].data()
@@ -223,7 +223,7 @@ namespace dawn_native { namespace metal {
atIndex:kBufferLengthBufferSlot];
}
- if (stagesToApply & dawn::ShaderStage::Fragment) {
+ if (stagesToApply & wgpu::ShaderStage::Fragment) {
uint32_t bufferCount = ToBackend(pipeline->GetLayout())
->GetBufferBindingCount(SingleShaderStage::Fragment);
[render setFragmentBytes:data[SingleShaderStage::Fragment].data()
@@ -236,7 +236,7 @@ namespace dawn_native { namespace metal {
}
void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
- if (!(dirtyStages & dawn::ShaderStage::Compute)) {
+ if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
return;
}
@@ -250,7 +250,7 @@ namespace dawn_native { namespace metal {
length:sizeof(uint32_t) * bufferCount
atIndex:kBufferLengthBufferSlot];
- dirtyStages ^= dawn::ShaderStage::Compute;
+ dirtyStages ^= wgpu::ShaderStage::Compute;
}
};
@@ -394,7 +394,7 @@ namespace dawn_native { namespace metal {
// pipeline state.
// Bind groups may be inherited because bind groups are packed in the buffer /
// texture tables in contiguous order.
- class BindGroupTracker : public BindGroupTrackerBase<BindGroup*, true> {
+ class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
public:
explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
: BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
@@ -403,8 +403,9 @@ namespace dawn_native { namespace metal {
template <typename Encoder>
void Apply(Encoder encoder) {
for (uint32_t index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- ApplyBindGroup(encoder, index, mBindGroups[index], mDynamicOffsetCounts[index],
- mDynamicOffsets[index].data(), ToBackend(mPipelineLayout));
+ ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
+ mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
+ ToBackend(mPipelineLayout));
}
DidApply();
}
@@ -429,9 +430,9 @@ namespace dawn_native { namespace metal {
// call here.
for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
auto stage = layout.visibilities[bindingIndex];
- bool hasVertStage = stage & dawn::ShaderStage::Vertex && render != nil;
- bool hasFragStage = stage & dawn::ShaderStage::Fragment && render != nil;
- bool hasComputeStage = stage & dawn::ShaderStage::Compute && compute != nil;
+ bool hasVertStage = stage & wgpu::ShaderStage::Vertex && render != nil;
+ bool hasFragStage = stage & wgpu::ShaderStage::Fragment && render != nil;
+ bool hasComputeStage = stage & wgpu::ShaderStage::Compute && compute != nil;
uint32_t vertIndex = 0;
uint32_t fragIndex = 0;
@@ -451,8 +452,8 @@ namespace dawn_native { namespace metal {
}
switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer:
- case dawn::BindingType::StorageBuffer: {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer: {
const BufferBinding& binding =
group->GetBindingAsBufferBinding(bindingIndex);
const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
@@ -468,7 +469,7 @@ namespace dawn_native { namespace metal {
if (hasVertStage) {
mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
binding.size;
- mLengthTracker->dirtyStages |= dawn::ShaderStage::Vertex;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
[render setVertexBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(vertIndex, 1)];
@@ -476,7 +477,7 @@ namespace dawn_native { namespace metal {
if (hasFragStage) {
mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
binding.size;
- mLengthTracker->dirtyStages |= dawn::ShaderStage::Fragment;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
[render setFragmentBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(fragIndex, 1)];
@@ -484,7 +485,7 @@ namespace dawn_native { namespace metal {
if (hasComputeStage) {
mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
binding.size;
- mLengthTracker->dirtyStages |= dawn::ShaderStage::Compute;
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
[compute setBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(computeIndex, 1)];
@@ -492,7 +493,7 @@ namespace dawn_native { namespace metal {
} break;
- case dawn::BindingType::Sampler: {
+ case wgpu::BindingType::Sampler: {
auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
if (hasVertStage) {
[render setVertexSamplerState:sampler->GetMTLSamplerState()
@@ -508,7 +509,7 @@ namespace dawn_native { namespace metal {
}
} break;
- case dawn::BindingType::SampledTexture: {
+ case wgpu::BindingType::SampledTexture: {
auto textureView =
ToBackend(group->GetBindingAsTextureView(bindingIndex));
if (hasVertStage) {
@@ -525,8 +526,8 @@ namespace dawn_native { namespace metal {
}
} break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
}
@@ -548,7 +549,7 @@ namespace dawn_native { namespace metal {
// Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
// all the relevant state.
- class VertexInputBufferTracker {
+ class VertexBufferTracker {
public:
void OnSetVertexBuffer(uint32_t slot, Buffer* buffer, uint64_t offset) {
mVertexBuffers[slot] = buffer->GetMTLBuffer();
@@ -563,12 +564,12 @@ namespace dawn_native { namespace metal {
// When a new pipeline is bound we must set all the vertex buffers again because
// they might have been offset by the pipeline layout, and they might be packed
// differently from the previous pipeline.
- mDirtyVertexBuffers |= pipeline->GetInputsSetMask();
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
}
void Apply(id<MTLRenderCommandEncoder> encoder, RenderPipeline* pipeline) {
std::bitset<kMaxVertexBuffers> vertexBuffersToApply =
- mDirtyVertexBuffers & pipeline->GetInputsSetMask();
+ mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
for (uint32_t dawnIndex : IterateBitSet(vertexBuffersToApply)) {
uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(dawnIndex);
@@ -590,8 +591,7 @@ namespace dawn_native { namespace metal {
} // anonymous namespace
- CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
- const CommandBufferDescriptor* descriptor)
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
}
@@ -763,9 +763,9 @@ namespace dawn_native { namespace metal {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
@@ -903,7 +903,7 @@ namespace dawn_native { namespace metal {
RenderPipeline* lastPipeline = nullptr;
id<MTLBuffer> indexBuffer = nil;
uint32_t indexBufferBaseOffset = 0;
- VertexInputBufferTracker vertexInputBuffers;
+ VertexBufferTracker vertexBuffers;
StorageBufferLengthTracker storageBufferLengths = {};
BindGroupTracker bindGroups(&storageBufferLengths);
@@ -916,7 +916,7 @@ namespace dawn_native { namespace metal {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- vertexInputBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline);
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline);
@@ -933,9 +933,9 @@ namespace dawn_native { namespace metal {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
size_t formatSize =
- IndexFormatSize(lastPipeline->GetVertexInputDescriptor()->indexFormat);
+ IndexFormatSize(lastPipeline->GetVertexStateDescriptor()->indexFormat);
- vertexInputBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline);
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline);
@@ -956,7 +956,7 @@ namespace dawn_native { namespace metal {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexInputBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline);
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline);
@@ -970,7 +970,7 @@ namespace dawn_native { namespace metal {
case Command::DrawIndexedIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexInputBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline);
bindGroups.Apply(encoder);
storageBufferLengths.Apply(encoder, lastPipeline);
@@ -1012,7 +1012,7 @@ namespace dawn_native { namespace metal {
SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
- vertexInputBuffers.OnSetPipeline(lastPipeline, newPipeline);
+ vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
bindGroups.OnSetPipeline(newPipeline);
[encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
@@ -1025,9 +1025,9 @@ namespace dawn_native { namespace metal {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
@@ -1044,8 +1044,8 @@ namespace dawn_native { namespace metal {
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- vertexInputBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
- cmd->offset);
+ vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+ cmd->offset);
} break;
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
index f62412a3104..fd723643e91 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ComputePipelineMTL.mm
@@ -33,7 +33,7 @@ namespace dawn_native { namespace metal {
[mtlDevice newComputePipelineStateWithFunction:computeData.function error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
- GetDevice()->HandleError(dawn::ErrorType::DeviceLost, "Error creating pipeline state");
+ GetDevice()->HandleError(wgpu::ErrorType::DeviceLost, "Error creating pipeline state");
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index 5d8c671e172..4424dc89104 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -38,7 +38,7 @@ namespace dawn_native { namespace metal {
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
~Device();
- CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) override;
Serial GetCompletedCommandSerial() const final override;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index bacc529ab05..504e6b3ade8 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -29,6 +29,7 @@
#include "dawn_native/metal/StagingBufferMTL.h"
#include "dawn_native/metal/SwapChainMTL.h"
#include "dawn_native/metal/TextureMTL.h"
+#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
#include <type_traits>
@@ -76,15 +77,18 @@ namespace dawn_native { namespace metal {
}
void Device::InitTogglesFromDriver() {
+ {
+ bool haveStoreAndMSAAResolve = false;
#if defined(DAWN_PLATFORM_MACOS)
- if (@available(macOS 10.12, *)) {
- bool emulateStoreAndMSAAResolve =
- ![mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
- SetToggle(Toggle::EmulateStoreAndMSAAResolve, emulateStoreAndMSAAResolve);
- }
+ haveStoreAndMSAAResolve =
+ [mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+#elif defined(DAWN_PLATFORM_IOS)
+ haveStoreAndMSAAResolve =
+ [mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
#endif
-
- // TODO(jiawei.shao@intel.com): check iOS feature sets
+ // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
+ SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+ }
// TODO(jiawei.shao@intel.com): tighten this workaround when the driver bug is fixed.
SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
@@ -101,7 +105,7 @@ namespace dawn_native { namespace metal {
ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return new Buffer(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return new CommandBuffer(encoder, descriptor);
}
@@ -125,7 +129,7 @@ namespace dawn_native { namespace metal {
}
ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor) {
- return new ShaderModule(this, descriptor);
+ return ShaderModule::Create(this, descriptor);
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
@@ -180,8 +184,7 @@ namespace dawn_native { namespace metal {
}
id<MTLCommandBuffer> Device::GetPendingCommandBuffer() {
- TRACE_EVENT0(GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "DeviceMTL::GetPendingCommandBuffer");
+ TRACE_EVENT0(GetPlatform(), General, "DeviceMTL::GetPendingCommandBuffer");
if (mPendingCommands == nil) {
mPendingCommands = [mCommandQueue commandBuffer];
[mPendingCommands retain];
@@ -228,14 +231,14 @@ namespace dawn_native { namespace metal {
// mLastSubmittedSerial so it is captured by value.
Serial pendingSerial = mLastSubmittedSerial;
[mPendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
- TRACE_EVENT_ASYNC_END0(GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "DeviceMTL::SubmitPendingCommandBuffer", pendingSerial);
+ TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ pendingSerial);
ASSERT(pendingSerial > mCompletedSerial.load());
this->mCompletedSerial = pendingSerial;
}];
- TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "DeviceMTL::SubmitPendingCommandBuffer", pendingSerial);
+ TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+ pendingSerial);
[mPendingCommands commit];
mPendingCommands = nil;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm b/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
index e5c88673fff..22b583af18c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/MetalBackend.mm
@@ -22,23 +22,23 @@
namespace dawn_native { namespace metal {
- id<MTLDevice> GetMetalDevice(DawnDevice cDevice) {
+ id<MTLDevice> GetMetalDevice(WGPUDevice cDevice) {
Device* device = reinterpret_cast<Device*>(cDevice);
return device->GetMTLDevice();
}
- DawnTexture WrapIOSurface(DawnDevice cDevice,
- const DawnTextureDescriptor* cDescriptor,
+ WGPUTexture WrapIOSurface(WGPUDevice cDevice,
+ const WGPUTextureDescriptor* cDescriptor,
IOSurfaceRef ioSurface,
uint32_t plane) {
Device* device = reinterpret_cast<Device*>(cDevice);
const TextureDescriptor* descriptor =
reinterpret_cast<const TextureDescriptor*>(cDescriptor);
TextureBase* texture = device->CreateTextureWrappingIOSurface(descriptor, ioSurface, plane);
- return reinterpret_cast<DawnTexture>(texture);
+ return reinterpret_cast<WGPUTexture>(texture);
}
- void WaitForCommandsToBeScheduled(DawnDevice cDevice) {
+ void WaitForCommandsToBeScheduled(WGPUDevice cDevice) {
Device* device = reinterpret_cast<Device*>(cDevice);
device->WaitForCommandsToBeScheduled();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index 491a70abe72..dc528915c35 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -39,21 +39,21 @@ namespace dawn_native { namespace metal {
}
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer:
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer:
mIndexInfo[stage][group][binding] = bufferIndex;
bufferIndex++;
break;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
mIndexInfo[stage][group][binding] = samplerIndex;
samplerIndex++;
break;
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::SampledTexture:
mIndexInfo[stage][group][binding] = textureIndex;
textureIndex++;
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
index d815c6e56ce..dd360e970b0 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
@@ -16,6 +16,7 @@
#include "dawn_native/metal/CommandBufferMTL.h"
#include "dawn_native/metal/DeviceMTL.h"
+#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native { namespace metal {
@@ -28,13 +29,11 @@ namespace dawn_native { namespace metal {
device->Tick();
id<MTLCommandBuffer> commandBuffer = device->GetPendingCommandBuffer();
- TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "CommandBufferMTL::FillCommands");
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
ToBackend(commands[i])->FillCommands(commandBuffer);
}
- TRACE_EVENT_END0(GetDevice()->GetPlatform(), TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "CommandBufferMTL::FillCommands");
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
device->SubmitPendingCommandBuffer();
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
index 1b764ddae5a..bce358b92c8 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.h
@@ -41,7 +41,7 @@ namespace dawn_native { namespace metal {
// vertex buffer table.
uint32_t GetMtlVertexBufferIndex(uint32_t dawnIndex) const;
- dawn::ShaderStage GetStagesRequiringStorageBufferLength() const;
+ wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
private:
MTLVertexDescriptor* MakeVertexDesc();
@@ -54,7 +54,7 @@ namespace dawn_native { namespace metal {
id<MTLDepthStencilState> mMtlDepthStencilState = nil;
std::array<uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
- dawn::ShaderStage mStagesRequiringStorageBufferLength = dawn::ShaderStage::None;
+ wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 6683f1ca9a1..3c4d8523f6c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -23,166 +23,166 @@
namespace dawn_native { namespace metal {
namespace {
- MTLVertexFormat VertexFormatType(dawn::VertexFormat format) {
+ MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar2:
return MTLVertexFormatUChar2;
- case dawn::VertexFormat::UChar4:
+ case wgpu::VertexFormat::UChar4:
return MTLVertexFormatUChar4;
- case dawn::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char2:
return MTLVertexFormatChar2;
- case dawn::VertexFormat::Char4:
+ case wgpu::VertexFormat::Char4:
return MTLVertexFormatChar4;
- case dawn::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar2Norm:
return MTLVertexFormatUChar2Normalized;
- case dawn::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::UChar4Norm:
return MTLVertexFormatUChar4Normalized;
- case dawn::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char2Norm:
return MTLVertexFormatChar2Normalized;
- case dawn::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Char4Norm:
return MTLVertexFormatChar4Normalized;
- case dawn::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort2:
return MTLVertexFormatUShort2;
- case dawn::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort4:
return MTLVertexFormatUShort4;
- case dawn::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short2:
return MTLVertexFormatShort2;
- case dawn::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short4:
return MTLVertexFormatShort4;
- case dawn::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort2Norm:
return MTLVertexFormatUShort2Normalized;
- case dawn::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::UShort4Norm:
return MTLVertexFormatUShort4Normalized;
- case dawn::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short2Norm:
return MTLVertexFormatShort2Normalized;
- case dawn::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Short4Norm:
return MTLVertexFormatShort4Normalized;
- case dawn::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half2:
return MTLVertexFormatHalf2;
- case dawn::VertexFormat::Half4:
+ case wgpu::VertexFormat::Half4:
return MTLVertexFormatHalf4;
- case dawn::VertexFormat::Float:
+ case wgpu::VertexFormat::Float:
return MTLVertexFormatFloat;
- case dawn::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float2:
return MTLVertexFormatFloat2;
- case dawn::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float3:
return MTLVertexFormatFloat3;
- case dawn::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float4:
return MTLVertexFormatFloat4;
- case dawn::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt:
return MTLVertexFormatUInt;
- case dawn::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt2:
return MTLVertexFormatUInt2;
- case dawn::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt3:
return MTLVertexFormatUInt3;
- case dawn::VertexFormat::UInt4:
+ case wgpu::VertexFormat::UInt4:
return MTLVertexFormatUInt4;
- case dawn::VertexFormat::Int:
+ case wgpu::VertexFormat::Int:
return MTLVertexFormatInt;
- case dawn::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int2:
return MTLVertexFormatInt2;
- case dawn::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int3:
return MTLVertexFormatInt3;
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::Int4:
return MTLVertexFormatInt4;
}
}
- MTLVertexStepFunction InputStepModeFunction(dawn::InputStepMode mode) {
+ MTLVertexStepFunction InputStepModeFunction(wgpu::InputStepMode mode) {
switch (mode) {
- case dawn::InputStepMode::Vertex:
+ case wgpu::InputStepMode::Vertex:
return MTLVertexStepFunctionPerVertex;
- case dawn::InputStepMode::Instance:
+ case wgpu::InputStepMode::Instance:
return MTLVertexStepFunctionPerInstance;
}
}
- MTLPrimitiveType MTLPrimitiveTopology(dawn::PrimitiveTopology primitiveTopology) {
+ MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
switch (primitiveTopology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return MTLPrimitiveTypePoint;
- case dawn::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineList:
return MTLPrimitiveTypeLine;
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineStrip:
return MTLPrimitiveTypeLineStrip;
- case dawn::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleList:
return MTLPrimitiveTypeTriangle;
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return MTLPrimitiveTypeTriangleStrip;
}
}
MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
- dawn::PrimitiveTopology primitiveTopology) {
+ wgpu::PrimitiveTopology primitiveTopology) {
switch (primitiveTopology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return MTLPrimitiveTopologyClassPoint;
- case dawn::PrimitiveTopology::LineList:
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineStrip:
return MTLPrimitiveTopologyClassLine;
- case dawn::PrimitiveTopology::TriangleList:
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return MTLPrimitiveTopologyClassTriangle;
}
}
- MTLIndexType MTLIndexFormat(dawn::IndexFormat format) {
+ MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
switch (format) {
- case dawn::IndexFormat::Uint16:
+ case wgpu::IndexFormat::Uint16:
return MTLIndexTypeUInt16;
- case dawn::IndexFormat::Uint32:
+ case wgpu::IndexFormat::Uint32:
return MTLIndexTypeUInt32;
}
}
- MTLBlendFactor MetalBlendFactor(dawn::BlendFactor factor, bool alpha) {
+ MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
switch (factor) {
- case dawn::BlendFactor::Zero:
+ case wgpu::BlendFactor::Zero:
return MTLBlendFactorZero;
- case dawn::BlendFactor::One:
+ case wgpu::BlendFactor::One:
return MTLBlendFactorOne;
- case dawn::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::SrcColor:
return MTLBlendFactorSourceColor;
- case dawn::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
return MTLBlendFactorOneMinusSourceColor;
- case dawn::BlendFactor::SrcAlpha:
+ case wgpu::BlendFactor::SrcAlpha:
return MTLBlendFactorSourceAlpha;
- case dawn::BlendFactor::OneMinusSrcAlpha:
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
return MTLBlendFactorOneMinusSourceAlpha;
- case dawn::BlendFactor::DstColor:
+ case wgpu::BlendFactor::DstColor:
return MTLBlendFactorDestinationColor;
- case dawn::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
return MTLBlendFactorOneMinusDestinationColor;
- case dawn::BlendFactor::DstAlpha:
+ case wgpu::BlendFactor::DstAlpha:
return MTLBlendFactorDestinationAlpha;
- case dawn::BlendFactor::OneMinusDstAlpha:
+ case wgpu::BlendFactor::OneMinusDstAlpha:
return MTLBlendFactorOneMinusDestinationAlpha;
- case dawn::BlendFactor::SrcAlphaSaturated:
+ case wgpu::BlendFactor::SrcAlphaSaturated:
return MTLBlendFactorSourceAlphaSaturated;
- case dawn::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::BlendColor:
return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
- case dawn::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
return alpha ? MTLBlendFactorOneMinusBlendAlpha
: MTLBlendFactorOneMinusBlendColor;
}
}
- MTLBlendOperation MetalBlendOperation(dawn::BlendOperation operation) {
+ MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
switch (operation) {
- case dawn::BlendOperation::Add:
+ case wgpu::BlendOperation::Add:
return MTLBlendOperationAdd;
- case dawn::BlendOperation::Subtract:
+ case wgpu::BlendOperation::Subtract:
return MTLBlendOperationSubtract;
- case dawn::BlendOperation::ReverseSubtract:
+ case wgpu::BlendOperation::ReverseSubtract:
return MTLBlendOperationReverseSubtract;
- case dawn::BlendOperation::Min:
+ case wgpu::BlendOperation::Min:
return MTLBlendOperationMin;
- case dawn::BlendOperation::Max:
+ case wgpu::BlendOperation::Max:
return MTLBlendOperationMax;
}
}
- MTLColorWriteMask MetalColorWriteMask(dawn::ColorWriteMask writeMask,
+ MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
bool isDeclaredInFragmentShader) {
if (!isDeclaredInFragmentShader) {
return MTLColorWriteMaskNone;
@@ -190,16 +190,16 @@ namespace dawn_native { namespace metal {
MTLColorWriteMask mask = MTLColorWriteMaskNone;
- if (writeMask & dawn::ColorWriteMask::Red) {
+ if (writeMask & wgpu::ColorWriteMask::Red) {
mask |= MTLColorWriteMaskRed;
}
- if (writeMask & dawn::ColorWriteMask::Green) {
+ if (writeMask & wgpu::ColorWriteMask::Green) {
mask |= MTLColorWriteMaskGreen;
}
- if (writeMask & dawn::ColorWriteMask::Blue) {
+ if (writeMask & wgpu::ColorWriteMask::Blue) {
mask |= MTLColorWriteMaskBlue;
}
- if (writeMask & dawn::ColorWriteMask::Alpha) {
+ if (writeMask & wgpu::ColorWriteMask::Alpha) {
mask |= MTLColorWriteMaskAlpha;
}
@@ -224,23 +224,23 @@ namespace dawn_native { namespace metal {
MetalColorWriteMask(descriptor->writeMask, isDeclaredInFragmentShader);
}
- MTLStencilOperation MetalStencilOperation(dawn::StencilOperation stencilOperation) {
+ MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
switch (stencilOperation) {
- case dawn::StencilOperation::Keep:
+ case wgpu::StencilOperation::Keep:
return MTLStencilOperationKeep;
- case dawn::StencilOperation::Zero:
+ case wgpu::StencilOperation::Zero:
return MTLStencilOperationZero;
- case dawn::StencilOperation::Replace:
+ case wgpu::StencilOperation::Replace:
return MTLStencilOperationReplace;
- case dawn::StencilOperation::Invert:
+ case wgpu::StencilOperation::Invert:
return MTLStencilOperationInvert;
- case dawn::StencilOperation::IncrementClamp:
+ case wgpu::StencilOperation::IncrementClamp:
return MTLStencilOperationIncrementClamp;
- case dawn::StencilOperation::DecrementClamp:
+ case wgpu::StencilOperation::DecrementClamp:
return MTLStencilOperationDecrementClamp;
- case dawn::StencilOperation::IncrementWrap:
+ case wgpu::StencilOperation::IncrementWrap:
return MTLStencilOperationIncrementWrap;
- case dawn::StencilOperation::DecrementWrap:
+ case wgpu::StencilOperation::DecrementWrap:
return MTLStencilOperationDecrementWrap;
}
}
@@ -289,22 +289,22 @@ namespace dawn_native { namespace metal {
return mtlDepthStencilDescriptor;
}
- MTLWinding MTLFrontFace(dawn::FrontFace face) {
+ MTLWinding MTLFrontFace(wgpu::FrontFace face) {
switch (face) {
- case dawn::FrontFace::CW:
+ case wgpu::FrontFace::CW:
return MTLWindingClockwise;
- case dawn::FrontFace::CCW:
+ case wgpu::FrontFace::CCW:
return MTLWindingCounterClockwise;
}
}
- MTLCullMode ToMTLCullMode(dawn::CullMode mode) {
+ MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
switch (mode) {
- case dawn::CullMode::None:
+ case wgpu::CullMode::None:
return MTLCullModeNone;
- case dawn::CullMode::Front:
+ case wgpu::CullMode::Front:
return MTLCullModeFront;
- case dawn::CullMode::Back:
+ case wgpu::CullMode::Back:
return MTLCullModeBack;
}
}
@@ -313,7 +313,7 @@ namespace dawn_native { namespace metal {
RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
: RenderPipelineBase(device, descriptor),
- mMtlIndexType(MTLIndexFormat(GetVertexInputDescriptor()->indexFormat)),
+ mMtlIndexType(MTLIndexFormat(GetVertexStateDescriptor()->indexFormat)),
mMtlPrimitiveTopology(MTLPrimitiveTopology(GetPrimitiveTopology())),
mMtlFrontFace(MTLFrontFace(GetFrontFace())),
mMtlCullMode(ToMTLCullMode(GetCullMode())) {
@@ -327,7 +327,7 @@ namespace dawn_native { namespace metal {
vertexEntryPoint, SingleShaderStage::Vertex, ToBackend(GetLayout()));
descriptorMTL.vertexFunction = vertexData.function;
if (vertexData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= dawn::ShaderStage::Vertex;
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
}
const ShaderModule* fragmentModule = ToBackend(descriptor->fragmentStage->module);
@@ -336,12 +336,12 @@ namespace dawn_native { namespace metal {
fragmentEntryPoint, SingleShaderStage::Fragment, ToBackend(GetLayout()));
descriptorMTL.fragmentFunction = fragmentData.function;
if (fragmentData.needsStorageBufferLength) {
- mStagesRequiringStorageBufferLength |= dawn::ShaderStage::Fragment;
+ mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
}
if (HasDepthStencilAttachment()) {
// TODO(kainino@chromium.org): Handle depth-only and stencil-only formats.
- dawn::TextureFormat depthStencilFormat = GetDepthStencilFormat();
+ wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
descriptorMTL.depthAttachmentPixelFormat = MetalPixelFormat(depthStencilFormat);
descriptorMTL.stencilAttachmentPixelFormat = MetalPixelFormat(depthStencilFormat);
}
@@ -372,7 +372,7 @@ namespace dawn_native { namespace metal {
[descriptorMTL release];
if (error != nil) {
NSLog(@" error => %@", error);
- device->HandleError(dawn::ErrorType::DeviceLost,
+ device->HandleError(wgpu::ErrorType::DeviceLost,
"Error creating rendering pipeline state");
return;
}
@@ -421,7 +421,7 @@ namespace dawn_native { namespace metal {
return mMtlVertexBufferIndices[dawnIndex];
}
- dawn::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
+ wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
return mStagesRequiringStorageBufferLength;
}
@@ -432,49 +432,49 @@ namespace dawn_native { namespace metal {
uint32_t mtlVertexBufferIndex =
ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
- for (uint32_t dawnVertexBufferIndex : IterateBitSet(GetInputsSetMask())) {
- const VertexBufferInfo& info = GetInput(dawnVertexBufferIndex);
+ for (uint32_t dawnVertexBufferSlot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& info = GetVertexBuffer(dawnVertexBufferSlot);
MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
- if (info.stride == 0) {
+ if (info.arrayStride == 0) {
// For MTLVertexStepFunctionConstant, the stepRate must be 0,
- // but the stride must NOT be 0, so we made up it with
+ // but the arrayStride must NOT be 0, so we made up it with
// max(attrib.offset + sizeof(attrib) for each attrib)
- size_t max_stride = 0;
- for (uint32_t attribIndex : IterateBitSet(GetAttributesSetMask())) {
+ size_t maxArrayStride = 0;
+ for (uint32_t attribIndex : IterateBitSet(GetAttributeLocationsUsed())) {
const VertexAttributeInfo& attrib = GetAttribute(attribIndex);
// Only use the attributes that use the current input
- if (attrib.inputSlot != dawnVertexBufferIndex) {
+ if (attrib.vertexBufferSlot != dawnVertexBufferSlot) {
continue;
}
- max_stride = std::max(max_stride,
- VertexFormatSize(attrib.format) + size_t(attrib.offset));
+ maxArrayStride = std::max(
+ maxArrayStride, VertexFormatSize(attrib.format) + size_t(attrib.offset));
}
layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
layoutDesc.stepRate = 0;
// Metal requires the stride must be a multiple of 4 bytes, align it with next
// multiple of 4 if it's not.
- layoutDesc.stride = Align(max_stride, 4);
+ layoutDesc.stride = Align(maxArrayStride, 4);
} else {
layoutDesc.stepFunction = InputStepModeFunction(info.stepMode);
layoutDesc.stepRate = 1;
- layoutDesc.stride = info.stride;
+ layoutDesc.stride = info.arrayStride;
}
mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
[layoutDesc release];
- mMtlVertexBufferIndices[dawnVertexBufferIndex] = mtlVertexBufferIndex;
+ mMtlVertexBufferIndices[dawnVertexBufferSlot] = mtlVertexBufferIndex;
mtlVertexBufferIndex++;
}
- for (uint32_t i : IterateBitSet(GetAttributesSetMask())) {
+ for (uint32_t i : IterateBitSet(GetAttributeLocationsUsed())) {
const VertexAttributeInfo& info = GetAttribute(i);
auto attribDesc = [MTLVertexAttributeDescriptor new];
attribDesc.format = VertexFormatType(info.format);
attribDesc.offset = info.offset;
- attribDesc.bufferIndex = mMtlVertexBufferIndices[info.inputSlot];
+ attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
mtlVertexDescriptor.attributes[i] = attribDesc;
[attribDesc release];
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
index 720f3717a44..c58e5823af5 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SamplerMTL.mm
@@ -20,31 +20,31 @@
namespace dawn_native { namespace metal {
namespace {
- MTLSamplerMinMagFilter FilterModeToMinMagFilter(dawn::FilterMode mode) {
+ MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
switch (mode) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return MTLSamplerMinMagFilterNearest;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return MTLSamplerMinMagFilterLinear;
}
}
- MTLSamplerMipFilter FilterModeToMipFilter(dawn::FilterMode mode) {
+ MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
switch (mode) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return MTLSamplerMipFilterNearest;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return MTLSamplerMipFilterLinear;
}
}
- MTLSamplerAddressMode AddressMode(dawn::AddressMode mode) {
+ MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
switch (mode) {
- case dawn::AddressMode::Repeat:
+ case wgpu::AddressMode::Repeat:
return MTLSamplerAddressModeRepeat;
- case dawn::AddressMode::MirrorRepeat:
+ case wgpu::AddressMode::MirrorRepeat:
return MTLSamplerAddressModeMirrorRepeat;
- case dawn::AddressMode::ClampToEdge:
+ case wgpu::AddressMode::ClampToEdge:
return MTLSamplerAddressModeClampToEdge;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
index c31fdf1050e..e259b691a20 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
@@ -19,6 +19,8 @@
#import <Metal/Metal.h>
+#include "dawn_native/Error.h"
+
namespace spirv_cross {
class CompilerMSL;
}
@@ -30,7 +32,8 @@ namespace dawn_native { namespace metal {
class ShaderModule : public ShaderModuleBase {
public:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ static ResultOrError<ShaderModule*> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor);
struct MetalFunctionData {
id<MTLFunction> function;
@@ -45,6 +48,9 @@ namespace dawn_native { namespace metal {
const PipelineLayout* layout) const;
private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
+
// Calling compile on CompilerMSL somehow changes internal state that makes subsequent
// compiles return invalid MSL. We keep the spirv around and recreate the compiler everytime
// we need to use it.
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index d27c5aab14b..c01621dacf5 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -38,36 +38,86 @@ namespace dawn_native { namespace metal {
UNREACHABLE();
}
}
+ } // namespace
+
+ // static
+ ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor) {
+ std::unique_ptr<ShaderModule> module(new ShaderModule(device, descriptor));
+ if (!module)
+ return DAWN_VALIDATION_ERROR("Unable to create ShaderModule");
+ DAWN_TRY(module->Initialize(descriptor));
+ return module.release();
}
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
: ShaderModuleBase(device, descriptor) {
+ }
+
+ MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
mSpirv.assign(descriptor->code, descriptor->code + descriptor->codeSize);
- spirv_cross::CompilerMSL compiler(mSpirv);
- ExtractSpirvInfo(compiler);
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompileOptions options;
+ shaderc_spvc_status status =
+ mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
+ }
+
+ spirv_cross::CompilerMSL* compiler =
+ reinterpret_cast<spirv_cross::CompilerMSL*>(mSpvcContext.GetCompiler());
+ ExtractSpirvInfo(*compiler);
+ } else {
+ spirv_cross::CompilerMSL compiler(mSpirv);
+ ExtractSpirvInfo(compiler);
+ }
+ return {};
}
ShaderModule::MetalFunctionData ShaderModule::GetFunction(const char* functionName,
SingleShaderStage functionStage,
const PipelineLayout* layout) const {
- spirv_cross::CompilerMSL compiler(mSpirv);
-
- // If these options are changed, the values in DawnSPIRVCrossMSLFastFuzzer.cpp need to be
- // updated.
- spirv_cross::CompilerMSL::Options options_msl;
-
- // Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
- // Because Metal will reject PointSize builtin if the shader is compiled into a render
- // pipeline that uses a non-point topology.
- // TODO (hao.x.li@intel.com): Remove this once WebGPU requires there is no
- // gl_PointSize builtin (https://github.com/gpuweb/gpuweb/issues/332).
- options_msl.enable_point_size_builtin = false;
-
- // Always use vertex buffer 30 (the last one in the vertex buffer table) to contain
- // the shader storage buffer lengths.
- options_msl.buffer_size_buffer_index = kBufferLengthBufferSlot;
-
- compiler.set_msl_options(options_msl);
+ std::unique_ptr<spirv_cross::CompilerMSL> compiler_impl;
+ spirv_cross::CompilerMSL* compiler;
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
+ // be updated.
+ shaderc_spvc::CompileOptions options;
+
+ // Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
+ // Because Metal will reject PointSize builtin if the shader is compiled into a render
+ // pipeline that uses a non-point topology.
+ // TODO (hao.x.li@intel.com): Remove this once WebGPU requires there is no
+ // gl_PointSize builtin (https://github.com/gpuweb/gpuweb/issues/332).
+ options.SetMSLEnablePointSizeBuiltIn(false);
+
+ // Always use vertex buffer 30 (the last one in the vertex buffer table) to contain
+ // the shader storage buffer lengths.
+ options.SetMSLBufferSizeBufferIndex(kBufferLengthBufferSlot);
+ mSpvcContext.InitializeForMsl(mSpirv.data(), mSpirv.size(), options);
+ // TODO(rharrison): Handle initialize failing
+
+ compiler = reinterpret_cast<spirv_cross::CompilerMSL*>(mSpvcContext.GetCompiler());
+ } else {
+ // If these options are changed, the values in DawnSPIRVCrossMSLFastFuzzer.cpp need to
+ // be updated.
+ spirv_cross::CompilerMSL::Options options_msl;
+
+ // Disable PointSize builtin for https://bugs.chromium.org/p/dawn/issues/detail?id=146
+ // Because Metal will reject PointSize builtin if the shader is compiled into a render
+ // pipeline that uses a non-point topology.
+ // TODO (hao.x.li@intel.com): Remove this once WebGPU requires there is no
+ // gl_PointSize builtin (https://github.com/gpuweb/gpuweb/issues/332).
+ options_msl.enable_point_size_builtin = false;
+
+ // Always use vertex buffer 30 (the last one in the vertex buffer table) to contain
+ // the shader storage buffer lengths.
+ options_msl.buffer_size_buffer_index = kBufferLengthBufferSlot;
+
+ compiler_impl = std::make_unique<spirv_cross::CompilerMSL>(mSpirv);
+ compiler = compiler_impl.get();
+ compiler->set_msl_options(options_msl);
+ }
// By default SPIRV-Cross will give MSL resources indices in increasing order.
// To make the MSL indices match the indices chosen in the PipelineLayout, we build
@@ -86,7 +136,7 @@ namespace dawn_native { namespace metal {
mslBinding.binding = binding;
mslBinding.msl_buffer = mslBinding.msl_texture = mslBinding.msl_sampler = index;
- compiler.add_msl_resource_binding(mslBinding);
+ compiler->add_msl_resource_binding(mslBinding);
}
}
}
@@ -95,14 +145,14 @@ namespace dawn_native { namespace metal {
{
spv::ExecutionModel executionModel = SpirvExecutionModelForStage(functionStage);
- auto size = compiler.get_entry_point(functionName, executionModel).workgroup_size;
+ auto size = compiler->get_entry_point(functionName, executionModel).workgroup_size;
result.localWorkgroupSize = MTLSizeMake(size.x, size.y, size.z);
}
{
// SPIRV-Cross also supports re-ordering attributes but it seems to do the correct thing
// by default.
- std::string msl = compiler.compile();
+ std::string msl = compiler->compile();
NSString* mslSource = [NSString stringWithFormat:@"%s", msl.c_str()];
auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
@@ -126,7 +176,7 @@ namespace dawn_native { namespace metal {
[library release];
}
- result.needsStorageBufferLength = compiler.needs_buffer_size_buffer();
+ result.needsStorageBufferLength = compiler->needs_buffer_size_buffer();
return result;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
index 92458a209fd..4a35e41dee8 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/SwapChainMTL.mm
@@ -38,7 +38,7 @@ namespace dawn_native { namespace metal {
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(dawn::ErrorType::Unknown, error);
+ GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
index 13c30f87613..13ba9867596 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.h
@@ -24,7 +24,7 @@ namespace dawn_native { namespace metal {
class Device;
- MTLPixelFormat MetalPixelFormat(dawn::TextureFormat format);
+ MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
const TextureDescriptor* descriptor,
IOSurfaceRef ioSurface,
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 12bbc73fe7e..9d7701b329a 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -20,24 +20,24 @@
namespace dawn_native { namespace metal {
namespace {
- bool UsageNeedsTextureView(dawn::TextureUsage usage) {
- constexpr dawn::TextureUsage kUsageNeedsTextureView =
- dawn::TextureUsage::Storage | dawn::TextureUsage::Sampled;
+ bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+ constexpr wgpu::TextureUsage kUsageNeedsTextureView =
+ wgpu::TextureUsage::Storage | wgpu::TextureUsage::Sampled;
return usage & kUsageNeedsTextureView;
}
- MTLTextureUsage MetalTextureUsage(dawn::TextureUsage usage) {
+ MTLTextureUsage MetalTextureUsage(wgpu::TextureUsage usage) {
MTLTextureUsage result = MTLTextureUsageUnknown; // This is 0
- if (usage & (dawn::TextureUsage::Storage)) {
+ if (usage & (wgpu::TextureUsage::Storage)) {
result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
}
- if (usage & (dawn::TextureUsage::Sampled)) {
+ if (usage & (wgpu::TextureUsage::Sampled)) {
result |= MTLTextureUsageShaderRead;
}
- if (usage & (dawn::TextureUsage::OutputAttachment)) {
+ if (usage & (wgpu::TextureUsage::OutputAttachment)) {
result |= MTLTextureUsageRenderTarget;
}
@@ -48,11 +48,11 @@ namespace dawn_native { namespace metal {
return result;
}
- MTLTextureType MetalTextureType(dawn::TextureDimension dimension,
+ MTLTextureType MetalTextureType(wgpu::TextureDimension dimension,
unsigned int arrayLayers,
unsigned int sampleCount) {
switch (dimension) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (sampleCount > 1) {
ASSERT(arrayLayers == 1);
return MTLTextureType2DMultisample;
@@ -64,16 +64,16 @@ namespace dawn_native { namespace metal {
}
}
- MTLTextureType MetalTextureViewType(dawn::TextureViewDimension dimension,
+ MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
unsigned int sampleCount) {
switch (dimension) {
- case dawn::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2D:
return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
- case dawn::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e2DArray:
return MTLTextureType2DArray;
- case dawn::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::Cube:
return MTLTextureTypeCube;
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::CubeArray:
return MTLTextureTypeCubeArray;
default:
UNREACHABLE();
@@ -96,8 +96,8 @@ namespace dawn_native { namespace metal {
}
switch (textureViewDescriptor->dimension) {
- case dawn::TextureViewDimension::Cube:
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
return true;
default:
break;
@@ -106,16 +106,16 @@ namespace dawn_native { namespace metal {
return false;
}
- ResultOrError<dawn::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+ ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
switch (format) {
case 'RGBA':
- return dawn::TextureFormat::RGBA8Unorm;
+ return wgpu::TextureFormat::RGBA8Unorm;
case 'BGRA':
- return dawn::TextureFormat::BGRA8Unorm;
+ return wgpu::TextureFormat::BGRA8Unorm;
case '2C08':
- return dawn::TextureFormat::RG8Unorm;
+ return wgpu::TextureFormat::RG8Unorm;
case 'L008':
- return dawn::TextureFormat::R8Unorm;
+ return wgpu::TextureFormat::R8Unorm;
default:
return DAWN_VALIDATION_ERROR("Unsupported IOSurface format");
}
@@ -130,118 +130,118 @@ namespace dawn_native { namespace metal {
#endif
}
- MTLPixelFormat MetalPixelFormat(dawn::TextureFormat format) {
+ MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
switch (format) {
- case dawn::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Unorm:
return MTLPixelFormatR8Unorm;
- case dawn::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Snorm:
return MTLPixelFormatR8Snorm;
- case dawn::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Uint:
return MTLPixelFormatR8Uint;
- case dawn::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R8Sint:
return MTLPixelFormatR8Sint;
- case dawn::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Uint:
return MTLPixelFormatR16Uint;
- case dawn::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Sint:
return MTLPixelFormatR16Sint;
- case dawn::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R16Float:
return MTLPixelFormatR16Float;
- case dawn::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Unorm:
return MTLPixelFormatRG8Unorm;
- case dawn::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Snorm:
return MTLPixelFormatRG8Snorm;
- case dawn::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Uint:
return MTLPixelFormatRG8Uint;
- case dawn::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RG8Sint:
return MTLPixelFormatRG8Sint;
- case dawn::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Uint:
return MTLPixelFormatR32Uint;
- case dawn::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Sint:
return MTLPixelFormatR32Sint;
- case dawn::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Float:
return MTLPixelFormatR32Float;
- case dawn::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Uint:
return MTLPixelFormatRG16Uint;
- case dawn::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Sint:
return MTLPixelFormatRG16Sint;
- case dawn::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG16Float:
return MTLPixelFormatRG16Float;
- case dawn::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
return MTLPixelFormatRGBA8Unorm;
- case dawn::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
return MTLPixelFormatRGBA8Unorm_sRGB;
- case dawn::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
return MTLPixelFormatRGBA8Snorm;
- case dawn::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
return MTLPixelFormatRGBA8Uint;
- case dawn::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
return MTLPixelFormatRGBA8Sint;
- case dawn::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
return MTLPixelFormatBGRA8Unorm;
- case dawn::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
return MTLPixelFormatBGRA8Unorm_sRGB;
- case dawn::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
return MTLPixelFormatRGB10A2Unorm;
- case dawn::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Float:
return MTLPixelFormatRG11B10Float;
- case dawn::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Uint:
return MTLPixelFormatRG32Uint;
- case dawn::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Sint:
return MTLPixelFormatRG32Sint;
- case dawn::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Float:
return MTLPixelFormatRG32Float;
- case dawn::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
return MTLPixelFormatRGBA16Uint;
- case dawn::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
return MTLPixelFormatRGBA16Sint;
- case dawn::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA16Float:
return MTLPixelFormatRGBA16Float;
- case dawn::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
return MTLPixelFormatRGBA32Uint;
- case dawn::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
return MTLPixelFormatRGBA32Sint;
- case dawn::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Float:
return MTLPixelFormatRGBA32Float;
- case dawn::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth32Float:
return MTLPixelFormatDepth32Float;
- case dawn::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24Plus:
return MTLPixelFormatDepth32Float;
- case dawn::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
return MTLPixelFormatDepth32Float_Stencil8;
#if defined(DAWN_PLATFORM_MACOS)
- case dawn::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnorm:
return MTLPixelFormatBC1_RGBA;
- case dawn::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
return MTLPixelFormatBC1_RGBA_sRGB;
- case dawn::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
return MTLPixelFormatBC2_RGBA;
- case dawn::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
return MTLPixelFormatBC2_RGBA_sRGB;
- case dawn::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
return MTLPixelFormatBC3_RGBA;
- case dawn::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
return MTLPixelFormatBC3_RGBA_sRGB;
- case dawn::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
return MTLPixelFormatBC4_RSnorm;
- case dawn::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
return MTLPixelFormatBC4_RUnorm;
- case dawn::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
return MTLPixelFormatBC5_RGSnorm;
- case dawn::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
return MTLPixelFormatBC5_RGUnorm;
- case dawn::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
return MTLPixelFormatBC6H_RGBFloat;
- case dawn::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
return MTLPixelFormatBC6H_RGBUfloat;
- case dawn::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
return MTLPixelFormatBC7_RGBAUnorm;
- case dawn::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return MTLPixelFormatBC7_RGBAUnorm_sRGB;
#endif
@@ -261,7 +261,7 @@ namespace dawn_native { namespace metal {
return DAWN_VALIDATION_ERROR("IOSurface plane doesn't exist");
}
- if (descriptor->dimension != dawn::TextureDimension::e2D) {
+ if (descriptor->dimension != wgpu::TextureDimension::e2D) {
return DAWN_VALIDATION_ERROR("IOSurface texture must be 2D");
}
@@ -283,7 +283,7 @@ namespace dawn_native { namespace metal {
return DAWN_VALIDATION_ERROR("IOSurface size doesn't match descriptor");
}
- dawn::TextureFormat ioSurfaceFormat;
+ wgpu::TextureFormat ioSurfaceFormat;
DAWN_TRY_ASSIGN(ioSurfaceFormat,
GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
if (descriptor->format != ioSurfaceFormat) {
@@ -343,8 +343,10 @@ namespace dawn_native { namespace metal {
}
void Texture::DestroyImpl() {
- [mMtlTexture release];
- mMtlTexture = nil;
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ [mMtlTexture release];
+ mMtlTexture = nil;
+ }
}
id<MTLTexture> Texture::GetMTLTexture() {
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
index 574036d6438..091d8284f0d 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
@@ -21,7 +21,7 @@
namespace dawn_native { namespace metal {
- MTLCompareFunction ToMetalCompareFunction(dawn::CompareFunction compareFunction);
+ MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
index 86210379521..190c0e9453e 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
@@ -16,23 +16,23 @@
namespace dawn_native { namespace metal {
- MTLCompareFunction ToMetalCompareFunction(dawn::CompareFunction compareFunction) {
+ MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
switch (compareFunction) {
- case dawn::CompareFunction::Never:
+ case wgpu::CompareFunction::Never:
return MTLCompareFunctionNever;
- case dawn::CompareFunction::Less:
+ case wgpu::CompareFunction::Less:
return MTLCompareFunctionLess;
- case dawn::CompareFunction::LessEqual:
+ case wgpu::CompareFunction::LessEqual:
return MTLCompareFunctionLessEqual;
- case dawn::CompareFunction::Greater:
+ case wgpu::CompareFunction::Greater:
return MTLCompareFunctionGreater;
- case dawn::CompareFunction::GreaterEqual:
+ case wgpu::CompareFunction::GreaterEqual:
return MTLCompareFunctionGreaterEqual;
- case dawn::CompareFunction::NotEqual:
+ case wgpu::CompareFunction::NotEqual:
return MTLCompareFunctionNotEqual;
- case dawn::CompareFunction::Equal:
+ case wgpu::CompareFunction::Equal:
return MTLCompareFunctionEqual;
- case dawn::CompareFunction::Always:
+ case wgpu::CompareFunction::Always:
return MTLCompareFunctionAlways;
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index 08e39b2e60c..872d48674cc 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -103,7 +103,7 @@ namespace dawn_native { namespace null {
DAWN_TRY(IncrementMemoryUsage(descriptor->size));
return new Buffer(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return new CommandBuffer(encoder, descriptor);
}
@@ -129,9 +129,22 @@ namespace dawn_native { namespace null {
const ShaderModuleDescriptor* descriptor) {
auto module = new ShaderModule(this, descriptor);
- spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
- module->ExtractSpirvInfo(compiler);
-
+ if (IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompileOptions options;
+ shaderc_spvc::Context context;
+ shaderc_spvc_status status =
+ context.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
+ }
+
+ spirv_cross::Compiler* compiler =
+ reinterpret_cast<spirv_cross::Compiler*>(context.GetCompiler());
+ module->ExtractSpirvInfo(*compiler);
+ } else {
+ spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
+ module->ExtractSpirvInfo(compiler);
+ }
return module;
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
@@ -241,7 +254,7 @@ namespace dawn_native { namespace null {
bool Buffer::IsMapWritable() const {
// Only return true for mappable buffers so we can test cases that need / don't need a
// staging buffer.
- return (GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) != 0;
+ return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
@@ -251,9 +264,9 @@ namespace dawn_native { namespace null {
void Buffer::MapOperationCompleted(uint32_t serial, void* ptr, bool isWrite) {
if (isWrite) {
- CallMapWriteCallback(serial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, ptr, GetSize());
+ CallMapWriteCallback(serial, WGPUBufferMapAsyncStatus_Success, ptr, GetSize());
} else {
- CallMapReadCallback(serial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, ptr, GetSize());
+ CallMapReadCallback(serial, WGPUBufferMapAsyncStatus_Success, ptr, GetSize());
}
}
@@ -302,8 +315,7 @@ namespace dawn_native { namespace null {
// CommandBuffer
- CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
- const CommandBufferDescriptor* descriptor)
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
}
@@ -348,8 +360,8 @@ namespace dawn_native { namespace null {
void NativeSwapChainImpl::Init(WSIContext* context) {
}
- DawnSwapChainError NativeSwapChainImpl::Configure(DawnTextureFormat format,
- DawnTextureUsage,
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
uint32_t width,
uint32_t height) {
return DAWN_SWAP_CHAIN_NO_ERROR;
@@ -363,8 +375,8 @@ namespace dawn_native { namespace null {
return DAWN_SWAP_CHAIN_NO_ERROR;
}
- dawn::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return dawn::TextureFormat::RGBA8Unorm;
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
}
// StagingBuffer
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index ef98719223e..82b37bc798b 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -86,7 +86,7 @@ namespace dawn_native { namespace null {
Device(Adapter* adapter, const DeviceDescriptor* descriptor);
~Device();
- CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) override;
Serial GetCompletedCommandSerial() const final override;
@@ -178,7 +178,7 @@ namespace dawn_native { namespace null {
class CommandBuffer : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
private:
@@ -208,13 +208,13 @@ namespace dawn_native { namespace null {
public:
using WSIContext = struct {};
void Init(WSIContext* context);
- DawnSwapChainError Configure(DawnTextureFormat format,
- DawnTextureUsage,
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
uint32_t width,
uint32_t height);
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
DawnSwapChainError Present();
- dawn::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
};
class StagingBuffer : public StagingBufferBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp b/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp
index 14fff85850e..a48dcdccc4e 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/NullBackend.cpp
@@ -25,7 +25,7 @@ namespace dawn_native { namespace null {
DawnSwapChainImplementation CreateNativeSwapChainImpl() {
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
- impl.textureUsage = DAWN_TEXTURE_USAGE_PRESENT;
+ impl.textureUsage = WGPUTextureUsage_Present;
return impl;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
index 3f546914698..80311af20c7 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
@@ -65,7 +65,7 @@ namespace dawn_native { namespace opengl {
// version of OpenGL that would let us map the buffer unsynchronized.
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
void* data = gl.MapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
- CallMapReadCallback(serial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapReadCallback(serial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
return {};
}
@@ -76,7 +76,7 @@ namespace dawn_native { namespace opengl {
// version of OpenGL that would let us map the buffer unsynchronized.
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
void* data = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- CallMapWriteCallback(serial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapWriteCallback(serial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
return {};
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 9e9dae2e637..3993d5f1071 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -36,96 +36,96 @@ namespace dawn_native { namespace opengl {
namespace {
- GLenum IndexFormatType(dawn::IndexFormat format) {
+ GLenum IndexFormatType(wgpu::IndexFormat format) {
switch (format) {
- case dawn::IndexFormat::Uint16:
+ case wgpu::IndexFormat::Uint16:
return GL_UNSIGNED_SHORT;
- case dawn::IndexFormat::Uint32:
+ case wgpu::IndexFormat::Uint32:
return GL_UNSIGNED_INT;
default:
UNREACHABLE();
}
}
- GLenum VertexFormatType(dawn::VertexFormat format) {
+ GLenum VertexFormatType(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
- case dawn::VertexFormat::UChar4:
- case dawn::VertexFormat::UChar2Norm:
- case dawn::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
return GL_UNSIGNED_BYTE;
- case dawn::VertexFormat::Char2:
- case dawn::VertexFormat::Char4:
- case dawn::VertexFormat::Char2Norm:
- case dawn::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
return GL_BYTE;
- case dawn::VertexFormat::UShort2:
- case dawn::VertexFormat::UShort4:
- case dawn::VertexFormat::UShort2Norm:
- case dawn::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
return GL_UNSIGNED_SHORT;
- case dawn::VertexFormat::Short2:
- case dawn::VertexFormat::Short4:
- case dawn::VertexFormat::Short2Norm:
- case dawn::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
return GL_SHORT;
- case dawn::VertexFormat::Half2:
- case dawn::VertexFormat::Half4:
+ case wgpu::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half4:
return GL_HALF_FLOAT;
- case dawn::VertexFormat::Float:
- case dawn::VertexFormat::Float2:
- case dawn::VertexFormat::Float3:
- case dawn::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float:
+ case wgpu::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float4:
return GL_FLOAT;
- case dawn::VertexFormat::UInt:
- case dawn::VertexFormat::UInt2:
- case dawn::VertexFormat::UInt3:
- case dawn::VertexFormat::UInt4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
return GL_UNSIGNED_INT;
- case dawn::VertexFormat::Int:
- case dawn::VertexFormat::Int2:
- case dawn::VertexFormat::Int3:
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
return GL_INT;
default:
UNREACHABLE();
}
}
- GLboolean VertexFormatIsNormalized(dawn::VertexFormat format) {
+ GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2Norm:
- case dawn::VertexFormat::UChar4Norm:
- case dawn::VertexFormat::Char2Norm:
- case dawn::VertexFormat::Char4Norm:
- case dawn::VertexFormat::UShort2Norm:
- case dawn::VertexFormat::UShort4Norm:
- case dawn::VertexFormat::Short2Norm:
- case dawn::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short4Norm:
return GL_TRUE;
default:
return GL_FALSE;
}
}
- bool VertexFormatIsInt(dawn::VertexFormat format) {
+ bool VertexFormatIsInt(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
- case dawn::VertexFormat::UChar4:
- case dawn::VertexFormat::Char2:
- case dawn::VertexFormat::Char4:
- case dawn::VertexFormat::UShort2:
- case dawn::VertexFormat::UShort4:
- case dawn::VertexFormat::Short2:
- case dawn::VertexFormat::Short4:
- case dawn::VertexFormat::UInt:
- case dawn::VertexFormat::UInt2:
- case dawn::VertexFormat::UInt3:
- case dawn::VertexFormat::UInt4:
- case dawn::VertexFormat::Int:
- case dawn::VertexFormat::Int2:
- case dawn::VertexFormat::Int3:
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar4:
+ case wgpu::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char4:
+ case wgpu::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort4:
+ case wgpu::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short4:
+ case wgpu::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt4:
+ case wgpu::VertexFormat::Int:
+ case wgpu::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int4:
return true;
default:
return false;
@@ -133,9 +133,9 @@ namespace dawn_native { namespace opengl {
}
// Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
- // corresponds to an VertexInput. On the contrary in Dawn they are part of the global state.
- // This means that we have to re-apply these buffers on an VertexInput change.
- class InputBufferTracker {
+ // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
+ // This means that we have to re-apply these buffers on a VertexState change.
+ class VertexStateBufferBindingTracker {
public:
void OnSetIndexBuffer(BufferBase* buffer) {
mIndexBufferDirty = true;
@@ -157,7 +157,7 @@ namespace dawn_native { namespace opengl {
}
mIndexBufferDirty = true;
- mDirtyVertexBuffers |= pipeline->GetInputsSetMask();
+ mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
mLastPipeline = pipeline;
}
@@ -168,30 +168,32 @@ namespace dawn_native { namespace opengl {
mIndexBufferDirty = false;
}
- for (uint32_t slot :
- IterateBitSet(mDirtyVertexBuffers & mLastPipeline->GetInputsSetMask())) {
+ for (uint32_t slot : IterateBitSet(mDirtyVertexBuffers &
+ mLastPipeline->GetVertexBufferSlotsUsed())) {
for (uint32_t location :
- IterateBitSet(mLastPipeline->GetAttributesUsingInput(slot))) {
- auto attribute = mLastPipeline->GetAttribute(location);
+ IterateBitSet(mLastPipeline->GetAttributesUsingVertexBuffer(slot))) {
+ const VertexAttributeInfo& attribute =
+ mLastPipeline->GetAttribute(location);
GLuint buffer = mVertexBuffers[slot]->GetHandle();
uint64_t offset = mVertexBufferOffsets[slot];
- auto input = mLastPipeline->GetInput(slot);
- auto components = VertexFormatNumComponents(attribute.format);
- auto formatType = VertexFormatType(attribute.format);
+ const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
+ uint32_t components = VertexFormatNumComponents(attribute.format);
+ GLenum formatType = VertexFormatType(attribute.format);
GLboolean normalized = VertexFormatIsNormalized(attribute.format);
gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
if (VertexFormatIsInt(attribute.format)) {
- gl.VertexAttribIPointer(location, components, formatType, input.stride,
- reinterpret_cast<void*>(static_cast<intptr_t>(
- offset + attribute.offset)));
- } else {
- gl.VertexAttribPointer(
- location, components, formatType, normalized, input.stride,
+ gl.VertexAttribIPointer(
+ location, components, formatType, vertexBuffer.arrayStride,
reinterpret_cast<void*>(
static_cast<intptr_t>(offset + attribute.offset)));
+ } else {
+ gl.VertexAttribPointer(location, components, formatType, normalized,
+ vertexBuffer.arrayStride,
+ reinterpret_cast<void*>(static_cast<intptr_t>(
+ offset + attribute.offset)));
}
}
}
@@ -210,7 +212,7 @@ namespace dawn_native { namespace opengl {
RenderPipelineBase* mLastPipeline = nullptr;
};
- class BindGroupTracker : public BindGroupTrackerBase<BindGroupBase*, false> {
+ class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
public:
void OnSetPipeline(RenderPipeline* pipeline) {
BindGroupTrackerBase::OnSetPipeline(pipeline);
@@ -242,7 +244,7 @@ namespace dawn_native { namespace opengl {
for (uint32_t bindingIndex : IterateBitSet(layout.mask)) {
switch (layout.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer: {
+ case wgpu::BindingType::UniformBuffer: {
BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
GLuint buffer = ToBackend(binding.buffer)->GetHandle();
GLuint uboIndex = indices[bindingIndex];
@@ -257,7 +259,7 @@ namespace dawn_native { namespace opengl {
binding.size);
} break;
- case dawn::BindingType::Sampler: {
+ case wgpu::BindingType::Sampler: {
Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
GLuint samplerIndex = indices[bindingIndex];
@@ -273,7 +275,7 @@ namespace dawn_native { namespace opengl {
}
} break;
- case dawn::BindingType::SampledTexture: {
+ case wgpu::BindingType::SampledTexture: {
TextureView* view =
ToBackend(group->GetBindingAsTextureView(bindingIndex));
GLuint handle = view->GetHandle();
@@ -286,7 +288,7 @@ namespace dawn_native { namespace opengl {
}
} break;
- case dawn::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer: {
BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
GLuint buffer = ToBackend(binding.buffer)->GetHandle();
GLuint ssboIndex = indices[bindingIndex];
@@ -301,8 +303,8 @@ namespace dawn_native { namespace opengl {
binding.size);
} break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
@@ -392,8 +394,7 @@ namespace dawn_native { namespace opengl {
} // namespace
- CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
- const CommandBufferDescriptor* descriptor)
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
}
@@ -410,7 +411,7 @@ namespace dawn_native { namespace opengl {
// We count the lazy clears for non output attachment textures in order to match the
// backdoor lazy clear counts in Vulkan and D3D12.
bool isLazyClear =
- !(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment);
+ !(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment);
texture->EnsureSubresourceContentInitialized(
0, texture->GetNumMipLevels(), 0, texture->GetArrayLayers(), isLazyClear);
}
@@ -483,7 +484,7 @@ namespace dawn_native { namespace opengl {
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, formatInfo.blockHeight);
gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
- ASSERT(texture->GetDimension() == dawn::TextureDimension::e2D);
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
uint64_t copyDataSize = (copySize.width / formatInfo.blockWidth) *
(copySize.height / formatInfo.blockHeight) *
formatInfo.blockByteSize;
@@ -503,7 +504,7 @@ namespace dawn_native { namespace opengl {
}
} else {
switch (texture->GetDimension()) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (texture->GetArrayLayers() > 1) {
gl.TexSubImage3D(target, dst.mipLevel, dst.origin.x,
dst.origin.y, dst.arrayLayer, copySize.width,
@@ -556,7 +557,7 @@ namespace dawn_native { namespace opengl {
gl.GenFramebuffers(1, &readFBO);
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
switch (texture->GetDimension()) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (texture->GetArrayLayers() > 1) {
gl.FramebufferTextureLayer(
GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture->GetHandle(),
@@ -666,9 +667,9 @@ namespace dawn_native { namespace opengl {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
cmd->dynamicOffsetCount, dynamicOffsets);
@@ -785,19 +786,19 @@ namespace dawn_native { namespace opengl {
// componentType: things work for now because the clear color is always a float, but
// when that's fixed will lose precision on integer formats when converting to
// float.
- if (attachmentInfo->loadOp == dawn::LoadOp::Clear) {
+ if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
gl.ColorMaski(i, true, true, true, true);
gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo->clearColor.r);
}
switch (attachmentInfo->storeOp) {
- case dawn::StoreOp::Store: {
+ case wgpu::StoreOp::Store: {
view->GetTexture()->SetIsSubresourceContentInitialized(
true, view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount());
} break;
- case dawn::StoreOp::Clear: {
+ case wgpu::StoreOp::Clear: {
// TODO(natlee@microsoft.com): call glDiscard to do optimization
view->GetTexture()->SetIsSubresourceContentInitialized(
false, view->GetBaseMipLevel(), view->GetLevelCount(),
@@ -817,9 +818,9 @@ namespace dawn_native { namespace opengl {
// Load op - depth/stencil
bool doDepthClear = attachmentFormat.HasDepth() &&
- (attachmentInfo->depthLoadOp == dawn::LoadOp::Clear);
+ (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
bool doStencilClear = attachmentFormat.HasStencil() &&
- (attachmentInfo->stencilLoadOp == dawn::LoadOp::Clear);
+ (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
if (doDepthClear) {
gl.DepthMask(GL_TRUE);
@@ -838,13 +839,13 @@ namespace dawn_native { namespace opengl {
gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
}
- if (attachmentInfo->depthStoreOp == dawn::StoreOp::Store &&
- attachmentInfo->stencilStoreOp == dawn::StoreOp::Store) {
+ if (attachmentInfo->depthStoreOp == wgpu::StoreOp::Store &&
+ attachmentInfo->stencilStoreOp == wgpu::StoreOp::Store) {
view->GetTexture()->SetIsSubresourceContentInitialized(
true, view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount());
- } else if (attachmentInfo->depthStoreOp == dawn::StoreOp::Clear &&
- attachmentInfo->stencilStoreOp == dawn::StoreOp::Clear) {
+ } else if (attachmentInfo->depthStoreOp == wgpu::StoreOp::Clear &&
+ attachmentInfo->stencilStoreOp == wgpu::StoreOp::Clear) {
view->GetTexture()->SetIsSubresourceContentInitialized(
false, view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount());
@@ -855,14 +856,14 @@ namespace dawn_native { namespace opengl {
RenderPipeline* lastPipeline = nullptr;
uint64_t indexBufferBaseOffset = 0;
- InputBufferTracker inputBuffers;
+ VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
BindGroupTracker bindGroupTracker = {};
auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
switch (type) {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- inputBuffers.Apply(gl);
+ vertexStateBufferBindingTracker.Apply(gl);
bindGroupTracker.Apply(gl);
if (draw->firstInstance > 0) {
@@ -879,11 +880,11 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- inputBuffers.Apply(gl);
+ vertexStateBufferBindingTracker.Apply(gl);
bindGroupTracker.Apply(gl);
- dawn::IndexFormat indexFormat =
- lastPipeline->GetVertexInputDescriptor()->indexFormat;
+ wgpu::IndexFormat indexFormat =
+ lastPipeline->GetVertexStateDescriptor()->indexFormat;
size_t formatSize = IndexFormatSize(indexFormat);
GLenum formatType = IndexFormatType(indexFormat);
@@ -905,7 +906,7 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- inputBuffers.Apply(gl);
+ vertexStateBufferBindingTracker.Apply(gl);
bindGroupTracker.Apply(gl);
uint64_t indirectBufferOffset = draw->indirectOffset;
@@ -919,11 +920,11 @@ namespace dawn_native { namespace opengl {
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
- inputBuffers.Apply(gl);
+ vertexStateBufferBindingTracker.Apply(gl);
bindGroupTracker.Apply(gl);
- dawn::IndexFormat indexFormat =
- lastPipeline->GetVertexInputDescriptor()->indexFormat;
+ wgpu::IndexFormat indexFormat =
+ lastPipeline->GetVertexStateDescriptor()->indexFormat;
GLenum formatType = IndexFormatType(indexFormat);
uint64_t indirectBufferOffset = draw->indirectOffset;
@@ -948,15 +949,15 @@ namespace dawn_native { namespace opengl {
lastPipeline = ToBackend(cmd->pipeline).Get();
lastPipeline->ApplyNow(persistentPipelineState);
- inputBuffers.OnSetPipeline(lastPipeline);
+ vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
bindGroupTracker.OnSetPipeline(lastPipeline);
} break;
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- uint64_t* dynamicOffsets = nullptr;
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
cmd->dynamicOffsetCount, dynamicOffsets);
@@ -965,12 +966,13 @@ namespace dawn_native { namespace opengl {
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
indexBufferBaseOffset = cmd->offset;
- inputBuffers.OnSetIndexBuffer(cmd->buffer.Get());
+ vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
} break;
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
- inputBuffers.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(), cmd->offset);
+ vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
+ cmd->offset);
} break;
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
index 45fb7e02251..89bde6e1c92 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
@@ -28,7 +28,7 @@ namespace dawn_native { namespace opengl {
class CommandBuffer : public CommandBufferBase {
public:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
~CommandBuffer();
void Execute();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index a306e743b57..9c252ef6b6f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -75,7 +75,7 @@ namespace dawn_native { namespace opengl {
ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return new Buffer(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return new CommandBuffer(encoder, descriptor);
}
@@ -99,7 +99,7 @@ namespace dawn_native { namespace opengl {
}
ResultOrError<ShaderModuleBase*> Device::CreateShaderModuleImpl(
const ShaderModuleDescriptor* descriptor) {
- return new ShaderModule(this, descriptor);
+ return ShaderModule::Create(this, descriptor);
}
ResultOrError<SwapChainBase*> Device::CreateSwapChainImpl(
const SwapChainDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index 5bafeddfccc..757f27cd9d2 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -47,7 +47,7 @@ namespace dawn_native { namespace opengl {
void SubmitFenceSync();
// Dawn API
- CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) override;
Serial GetCompletedCommandSerial() const final override;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
index 460690c2b82..35b129f0aa7 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
@@ -21,7 +21,7 @@ namespace dawn_native { namespace opengl {
using Type = GLFormat::ComponentType;
- auto AddFormat = [&table](dawn::TextureFormat dawnFormat, GLenum internalFormat,
+ auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
GLenum format, GLenum type, Type componentType) {
size_t index = ComputeFormatIndex(dawnFormat);
ASSERT(index < table.size());
@@ -44,71 +44,71 @@ namespace dawn_native { namespace opengl {
// clang-format off
// 1 byte color formats
- AddFormat(dawn::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::R8Snorm, GL_R8_SNORM, GL_RED, GL_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::R8Uint, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(dawn::TextureFormat::R8Sint, GL_R8I, GL_RED_INTEGER, GL_BYTE, Type::Int);
+ AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::R8Snorm, GL_R8_SNORM, GL_RED, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::R8Uint, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R8Sint, GL_R8I, GL_RED_INTEGER, GL_BYTE, Type::Int);
// 2 bytes color formats
- AddFormat(dawn::TextureFormat::R16Uint, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(dawn::TextureFormat::R16Sint, GL_R16I, GL_RED_INTEGER, GL_SHORT, Type::Int);
- AddFormat(dawn::TextureFormat::R16Float, GL_R16F, GL_RED, GL_HALF_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::RG8Unorm, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RG8Snorm, GL_RG8_SNORM, GL_RG, GL_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RG8Uint, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(dawn::TextureFormat::RG8Sint, GL_RG8I, GL_RG_INTEGER, GL_BYTE, Type::Int);
+ AddFormat(wgpu::TextureFormat::R16Uint, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R16Sint, GL_R16I, GL_RED_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::R16Float, GL_R16F, GL_RED, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Unorm, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Snorm, GL_RG8_SNORM, GL_RG, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG8Uint, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG8Sint, GL_RG8I, GL_RG_INTEGER, GL_BYTE, Type::Int);
// 4 bytes color formats
- AddFormat(dawn::TextureFormat::R32Uint, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(dawn::TextureFormat::R32Sint, GL_R32I, GL_RED_INTEGER, GL_INT, Type::Int);
- AddFormat(dawn::TextureFormat::R32Float, GL_R32F, GL_RED, GL_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::RG16Uint, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(dawn::TextureFormat::RG16Sint, GL_RG16I, GL_RG_INTEGER, GL_SHORT, Type::Int);
- AddFormat(dawn::TextureFormat::RG16Float, GL_RG16F, GL_RG, GL_HALF_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::RGBA8Unorm, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RGBA8UnormSrgb, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RGBA8Snorm, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
- AddFormat(dawn::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
+ AddFormat(wgpu::TextureFormat::R32Uint, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::R32Sint, GL_R32I, GL_RED_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::R32Float, GL_R32F, GL_RED, GL_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG16Uint, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG16Sint, GL_RG16I, GL_RG_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RG16Float, GL_RG16F, GL_RG, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Unorm, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8UnormSrgb, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Snorm, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA8Uint, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA8Sint, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, Type::Int);
// This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
- AddFormat(dawn::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
- AddFormat(dawn::TextureFormat::RG11B10Float, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG11B10Float, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
// 8 bytes color formats
- AddFormat(dawn::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(dawn::TextureFormat::RG32Sint, GL_RG32I, GL_RG_INTEGER, GL_INT, Type::Int);
- AddFormat(dawn::TextureFormat::RG32Float, GL_RG32F, GL_RG, GL_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::RGBA16Uint, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
- AddFormat(dawn::TextureFormat::RGBA16Sint, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, Type::Int);
- AddFormat(dawn::TextureFormat::RGBA16Float, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RG32Sint, GL_RG32I, GL_RG_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RG32Float, GL_RG32F, GL_RG, GL_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA16Uint, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA16Sint, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RGBA16Float, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, Type::Float);
// 16 bytes color formats
- AddFormat(dawn::TextureFormat::RGBA32Uint, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, Type::Uint);
- AddFormat(dawn::TextureFormat::RGBA32Sint, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, Type::Int);
- AddFormat(dawn::TextureFormat::RGBA32Float, GL_RGBA32F, GL_RGBA, GL_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGBA32Uint, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, Type::Uint);
+ AddFormat(wgpu::TextureFormat::RGBA32Sint, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, Type::Int);
+ AddFormat(wgpu::TextureFormat::RGBA32Float, GL_RGBA32F, GL_RGBA, GL_FLOAT, Type::Float);
// Depth stencil formats
- AddFormat(dawn::TextureFormat::Depth32Float, GL_DEPTH_COMPONENT32F, GL_DEPTH, GL_FLOAT, Type::DepthStencil);
- AddFormat(dawn::TextureFormat::Depth24Plus, GL_DEPTH_COMPONENT32F, GL_DEPTH, GL_FLOAT, Type::DepthStencil);
- AddFormat(dawn::TextureFormat::Depth24PlusStencil8, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth32Float, GL_DEPTH_COMPONENT32F, GL_DEPTH, GL_FLOAT, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth24Plus, GL_DEPTH_COMPONENT32F, GL_DEPTH, GL_FLOAT, Type::DepthStencil);
+ AddFormat(wgpu::TextureFormat::Depth24PlusStencil8, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, Type::DepthStencil);
// Block compressed formats
- AddFormat(dawn::TextureFormat::BC1RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC1RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC2RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC2RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC3RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC3RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC4RSnorm, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC4RUnorm, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC5RGSnorm, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC5RGUnorm, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC6HRGBSfloat, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::BC6HRGBUfloat, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
- AddFormat(dawn::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
- AddFormat(dawn::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC1RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC1RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC2RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC2RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC3RGBAUnorm, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC3RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC4RSnorm, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC4RUnorm, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC5RGSnorm, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC5RGUnorm, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC6HRGBSfloat, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC6HRGBUfloat, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_HALF_FLOAT, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
+ AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
// clang-format on
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp
index f1078754915..3cfdad4f367 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.cpp
@@ -42,11 +42,11 @@ namespace dawn_native { namespace opengl {
mBackTexture, 0);
}
- DawnSwapChainError NativeSwapChainImpl::Configure(DawnTextureFormat format,
- DawnTextureUsage usage,
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
uint32_t width,
uint32_t height) {
- if (format != DAWN_TEXTURE_FORMAT_RGBA8_UNORM) {
+ if (format != WGPUTextureFormat_RGBA8Unorm) {
return "unsupported format";
}
ASSERT(width > 0);
@@ -80,8 +80,8 @@ namespace dawn_native { namespace opengl {
return DAWN_SWAP_CHAIN_NO_ERROR;
}
- dawn::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
- return dawn::TextureFormat::RGBA8Unorm;
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ return wgpu::TextureFormat::RGBA8Unorm;
}
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h
index 81a5dc9d86c..acda00576f8 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/NativeSwapChainImplGL.h
@@ -32,14 +32,14 @@ namespace dawn_native { namespace opengl {
~NativeSwapChainImpl();
void Init(DawnWSIContextGL* context);
- DawnSwapChainError Configure(DawnTextureFormat format,
- DawnTextureUsage,
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
uint32_t width,
uint32_t height);
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
DawnSwapChainError Present();
- dawn::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
private:
PresentCallback mPresentCallback;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
index 91b019f2131..fbab4152543 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/OpenGLBackend.cpp
@@ -27,7 +27,7 @@ namespace dawn_native { namespace opengl {
: AdapterDiscoveryOptionsBase(BackendType::OpenGL) {
}
- DawnSwapChainImplementation CreateNativeSwapChainImpl(DawnDevice device,
+ DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
PresentCallback present,
void* presentUserdata) {
Device* backendDevice = reinterpret_cast<Device*>(device);
@@ -35,15 +35,15 @@ namespace dawn_native { namespace opengl {
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(
new NativeSwapChainImpl(backendDevice, present, presentUserdata));
- impl.textureUsage = DAWN_TEXTURE_USAGE_PRESENT;
+ impl.textureUsage = WGPUTextureUsage_Present;
return impl;
}
- DawnTextureFormat GetNativeSwapChainPreferredFormat(
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
const DawnSwapChainImplementation* swapChain) {
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<DawnTextureFormat>(impl->GetPreferredFormat());
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
}
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
index 57d72643379..d76b091fd6f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.cpp
@@ -74,7 +74,7 @@ namespace dawn_native { namespace opengl {
mProgram = gl.CreateProgram();
- dawn::ShaderStage activeStages = dawn::ShaderStage::None;
+ wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
for (SingleShaderStage stage : IterateStages(kAllStages)) {
if (modules[stage] != nullptr) {
activeStages |= StageBit(stage);
@@ -118,14 +118,14 @@ namespace dawn_native { namespace opengl {
std::string name = GetBindingName(group, binding);
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer: {
+ case wgpu::BindingType::UniformBuffer: {
GLint location = gl.GetUniformBlockIndex(mProgram, name.c_str());
if (location != -1) {
gl.UniformBlockBinding(mProgram, location, indices[group][binding]);
}
} break;
- case dawn::BindingType::StorageBuffer: {
+ case wgpu::BindingType::StorageBuffer: {
GLuint location = gl.GetProgramResourceIndex(
mProgram, GL_SHADER_STORAGE_BLOCK, name.c_str());
if (location != GL_INVALID_INDEX) {
@@ -134,14 +134,14 @@ namespace dawn_native { namespace opengl {
}
} break;
- case dawn::BindingType::Sampler:
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
// These binding types are handled in the separate sampler and texture
// emulation
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
@@ -177,11 +177,11 @@ namespace dawn_native { namespace opengl {
indices[combined.textureLocation.group][combined.textureLocation.binding];
mUnitsForTextures[textureIndex].push_back(textureUnit);
- dawn::TextureComponentType componentType =
+ wgpu::TextureComponentType componentType =
layout->GetBindGroupLayout(combined.textureLocation.group)
->GetBindingInfo()
.textureComponentTypes[combined.textureLocation.binding];
- bool shouldUseFiltering = componentType == dawn::TextureComponentType::Float;
+ bool shouldUseFiltering = componentType == wgpu::TextureComponentType::Float;
GLuint samplerIndex =
indices[combined.samplerLocation.group][combined.samplerLocation.binding];
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
index 713c2bc1f2c..2884dfa5575 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
@@ -36,26 +36,26 @@ namespace dawn_native { namespace opengl {
}
switch (groupInfo.types[binding]) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
mIndexInfo[group][binding] = uboIndex;
uboIndex++;
break;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
mIndexInfo[group][binding] = samplerIndex;
samplerIndex++;
break;
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::SampledTexture:
mIndexInfo[group][binding] = sampledTextureIndex;
sampledTextureIndex++;
break;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
mIndexInfo[group][binding] = ssboIndex;
ssboIndex++;
break;
- case dawn::BindingType::StorageTexture:
- case dawn::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::StorageTexture:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
UNREACHABLE();
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index fde06c853a0..9e08f6ae458 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -16,6 +16,8 @@
#include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/DeviceGL.h"
+#include "dawn_platform/DawnPlatform.h"
+#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native { namespace opengl {
@@ -25,9 +27,11 @@ namespace dawn_native { namespace opengl {
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
Device* device = ToBackend(GetDevice());
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
for (uint32_t i = 0; i < commandCount; ++i) {
ToBackend(commands[i])->Execute();
}
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
device->SubmitFenceSync();
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
index efbe76184b4..b16783ce904 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
@@ -23,17 +23,17 @@ namespace dawn_native { namespace opengl {
namespace {
- GLenum GLPrimitiveTopology(dawn::PrimitiveTopology primitiveTopology) {
+ GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
switch (primitiveTopology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return GL_POINTS;
- case dawn::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineList:
return GL_LINES;
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineStrip:
return GL_LINE_STRIP;
- case dawn::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleList:
return GL_TRIANGLES;
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return GL_TRIANGLE_STRIP;
default:
UNREACHABLE();
@@ -41,66 +41,66 @@ namespace dawn_native { namespace opengl {
}
void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
- dawn::FrontFace face,
- dawn::CullMode mode) {
- if (mode == dawn::CullMode::None) {
+ wgpu::FrontFace face,
+ wgpu::CullMode mode) {
+ if (mode == wgpu::CullMode::None) {
gl.Disable(GL_CULL_FACE);
} else {
gl.Enable(GL_CULL_FACE);
// Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
// which is different from WebGPU and other backends (Y axis is down).
- GLenum direction = (face == dawn::FrontFace::CCW) ? GL_CW : GL_CCW;
+ GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
gl.FrontFace(direction);
- GLenum cullMode = (mode == dawn::CullMode::Front) ? GL_FRONT : GL_BACK;
+ GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
gl.CullFace(cullMode);
}
}
- GLenum GLBlendFactor(dawn::BlendFactor factor, bool alpha) {
+ GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
switch (factor) {
- case dawn::BlendFactor::Zero:
+ case wgpu::BlendFactor::Zero:
return GL_ZERO;
- case dawn::BlendFactor::One:
+ case wgpu::BlendFactor::One:
return GL_ONE;
- case dawn::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::SrcColor:
return GL_SRC_COLOR;
- case dawn::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
return GL_ONE_MINUS_SRC_COLOR;
- case dawn::BlendFactor::SrcAlpha:
+ case wgpu::BlendFactor::SrcAlpha:
return GL_SRC_ALPHA;
- case dawn::BlendFactor::OneMinusSrcAlpha:
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
return GL_ONE_MINUS_SRC_ALPHA;
- case dawn::BlendFactor::DstColor:
+ case wgpu::BlendFactor::DstColor:
return GL_DST_COLOR;
- case dawn::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
return GL_ONE_MINUS_DST_COLOR;
- case dawn::BlendFactor::DstAlpha:
+ case wgpu::BlendFactor::DstAlpha:
return GL_DST_ALPHA;
- case dawn::BlendFactor::OneMinusDstAlpha:
+ case wgpu::BlendFactor::OneMinusDstAlpha:
return GL_ONE_MINUS_DST_ALPHA;
- case dawn::BlendFactor::SrcAlphaSaturated:
+ case wgpu::BlendFactor::SrcAlphaSaturated:
return GL_SRC_ALPHA_SATURATE;
- case dawn::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::BlendColor:
return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
- case dawn::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
default:
UNREACHABLE();
}
}
- GLenum GLBlendMode(dawn::BlendOperation operation) {
+ GLenum GLBlendMode(wgpu::BlendOperation operation) {
switch (operation) {
- case dawn::BlendOperation::Add:
+ case wgpu::BlendOperation::Add:
return GL_FUNC_ADD;
- case dawn::BlendOperation::Subtract:
+ case wgpu::BlendOperation::Subtract:
return GL_FUNC_SUBTRACT;
- case dawn::BlendOperation::ReverseSubtract:
+ case wgpu::BlendOperation::ReverseSubtract:
return GL_FUNC_REVERSE_SUBTRACT;
- case dawn::BlendOperation::Min:
+ case wgpu::BlendOperation::Min:
return GL_MIN;
- case dawn::BlendOperation::Max:
+ case wgpu::BlendOperation::Max:
return GL_MAX;
default:
UNREACHABLE();
@@ -122,29 +122,29 @@ namespace dawn_native { namespace opengl {
} else {
gl.Disablei(GL_BLEND, attachment);
}
- gl.ColorMaski(attachment, descriptor->writeMask & dawn::ColorWriteMask::Red,
- descriptor->writeMask & dawn::ColorWriteMask::Green,
- descriptor->writeMask & dawn::ColorWriteMask::Blue,
- descriptor->writeMask & dawn::ColorWriteMask::Alpha);
+ gl.ColorMaski(attachment, descriptor->writeMask & wgpu::ColorWriteMask::Red,
+ descriptor->writeMask & wgpu::ColorWriteMask::Green,
+ descriptor->writeMask & wgpu::ColorWriteMask::Blue,
+ descriptor->writeMask & wgpu::ColorWriteMask::Alpha);
}
- GLuint OpenGLStencilOperation(dawn::StencilOperation stencilOperation) {
+ GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
switch (stencilOperation) {
- case dawn::StencilOperation::Keep:
+ case wgpu::StencilOperation::Keep:
return GL_KEEP;
- case dawn::StencilOperation::Zero:
+ case wgpu::StencilOperation::Zero:
return GL_ZERO;
- case dawn::StencilOperation::Replace:
+ case wgpu::StencilOperation::Replace:
return GL_REPLACE;
- case dawn::StencilOperation::Invert:
+ case wgpu::StencilOperation::Invert:
return GL_INVERT;
- case dawn::StencilOperation::IncrementClamp:
+ case wgpu::StencilOperation::IncrementClamp:
return GL_INCR;
- case dawn::StencilOperation::DecrementClamp:
+ case wgpu::StencilOperation::DecrementClamp:
return GL_DECR;
- case dawn::StencilOperation::IncrementWrap:
+ case wgpu::StencilOperation::IncrementWrap:
return GL_INCR_WRAP;
- case dawn::StencilOperation::DecrementWrap:
+ case wgpu::StencilOperation::DecrementWrap:
return GL_DECR_WRAP;
default:
UNREACHABLE();
@@ -155,7 +155,7 @@ namespace dawn_native { namespace opengl {
const DepthStencilStateDescriptor* descriptor,
PersistentPipelineState* persistentPipelineState) {
// Depth writes only occur if depth is enabled
- if (descriptor->depthCompare == dawn::CompareFunction::Always &&
+ if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
!descriptor->depthWriteEnabled) {
gl.Disable(GL_DEPTH_TEST);
} else {
@@ -202,7 +202,7 @@ namespace dawn_native { namespace opengl {
modules[SingleShaderStage::Fragment] = ToBackend(descriptor->fragmentStage->module);
PipelineGL::Initialize(device->gl, ToBackend(GetLayout()), modules);
- CreateVAOForVertexInput(descriptor->vertexInput);
+ CreateVAOForVertexState(descriptor->vertexState);
}
RenderPipeline::~RenderPipeline() {
@@ -215,28 +215,28 @@ namespace dawn_native { namespace opengl {
return mGlPrimitiveTopology;
}
- void RenderPipeline::CreateVAOForVertexInput(const VertexInputDescriptor* vertexInput) {
+ void RenderPipeline::CreateVAOForVertexState(const VertexStateDescriptor* vertexState) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.GenVertexArrays(1, &mVertexArrayObject);
gl.BindVertexArray(mVertexArrayObject);
- for (uint32_t location : IterateBitSet(GetAttributesSetMask())) {
+ for (uint32_t location : IterateBitSet(GetAttributeLocationsUsed())) {
const auto& attribute = GetAttribute(location);
gl.EnableVertexAttribArray(location);
- attributesUsingInput[attribute.inputSlot][location] = true;
- auto input = GetInput(attribute.inputSlot);
+ attributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
+ const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
- if (input.stride == 0) {
+ if (vertexBuffer.arrayStride == 0) {
// Emulate a stride of zero (constant vertex attribute) by
// setting the attribute instance divisor to a huge number.
gl.VertexAttribDivisor(location, 0xffffffff);
} else {
- switch (input.stepMode) {
- case dawn::InputStepMode::Vertex:
+ switch (vertexBuffer.stepMode) {
+ case wgpu::InputStepMode::Vertex:
break;
- case dawn::InputStepMode::Instance:
+ case wgpu::InputStepMode::Instance:
gl.VertexAttribDivisor(location, 1);
break;
default:
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
index b0dc2e8dd25..b8bcb26d2c5 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.h
@@ -37,7 +37,7 @@ namespace dawn_native { namespace opengl {
void ApplyNow(PersistentPipelineState& persistentPipelineState);
private:
- void CreateVAOForVertexInput(const VertexInputDescriptor* vertexInput);
+ void CreateVAOForVertexState(const VertexStateDescriptor* vertexState);
// TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
GLuint mVertexArrayObject;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
index 17bf353ed51..aef7abbd009 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SamplerGL.cpp
@@ -21,33 +21,33 @@
namespace dawn_native { namespace opengl {
namespace {
- GLenum MagFilterMode(dawn::FilterMode filter) {
+ GLenum MagFilterMode(wgpu::FilterMode filter) {
switch (filter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return GL_NEAREST;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return GL_LINEAR;
default:
UNREACHABLE();
}
}
- GLenum MinFilterMode(dawn::FilterMode minFilter, dawn::FilterMode mipMapFilter) {
+ GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
switch (minFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
switch (mipMapFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return GL_NEAREST_MIPMAP_NEAREST;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return GL_NEAREST_MIPMAP_LINEAR;
default:
UNREACHABLE();
}
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
switch (mipMapFilter) {
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return GL_LINEAR_MIPMAP_NEAREST;
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return GL_LINEAR_MIPMAP_LINEAR;
default:
UNREACHABLE();
@@ -57,13 +57,13 @@ namespace dawn_native { namespace opengl {
}
}
- GLenum WrapMode(dawn::AddressMode mode) {
+ GLenum WrapMode(wgpu::AddressMode mode) {
switch (mode) {
- case dawn::AddressMode::Repeat:
+ case wgpu::AddressMode::Repeat:
return GL_REPEAT;
- case dawn::AddressMode::MirrorRepeat:
+ case wgpu::AddressMode::MirrorRepeat:
return GL_MIRRORED_REPEAT;
- case dawn::AddressMode::ClampToEdge:
+ case wgpu::AddressMode::ClampToEdge:
return GL_CLAMP_TO_EDGE;
default:
UNREACHABLE();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
index 3059c582316..e1138420a04 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.cpp
@@ -47,50 +47,103 @@ namespace dawn_native { namespace opengl {
return o.str();
}
+ // static
+ ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
+ const ShaderModuleDescriptor* descriptor) {
+ std::unique_ptr<ShaderModule> module(new ShaderModule(device, descriptor));
+ if (!module)
+ return DAWN_VALIDATION_ERROR("Unable to create ShaderModule");
+ DAWN_TRY(module->Initialize(descriptor));
+ return module.release();
+ }
+
+ const char* ShaderModule::GetSource() const {
+ return mGlslSource.c_str();
+ }
+
+ const ShaderModule::CombinedSamplerInfo& ShaderModule::GetCombinedSamplerInfo() const {
+ return mCombinedInfo;
+ }
+
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
: ShaderModuleBase(device, descriptor) {
- spirv_cross::CompilerGLSL compiler(descriptor->code, descriptor->codeSize);
- // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to be
- // updated.
- spirv_cross::CompilerGLSL::Options options;
-
- // The range of Z-coordinate in the clipping volume of OpenGL is [-w, w], while it is [0, w]
- // in D3D12, Metal and Vulkan, so we should normalize it in shaders in all backends.
- // See the documentation of spirv_cross::CompilerGLSL::Options::vertex::fixup_clipspace for
- // more details.
- options.vertex.flip_vert_y = true;
- options.vertex.fixup_clipspace = true;
-
- // TODO(cwallez@chromium.org): discover the backing context version and use that.
+ }
+
+ MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
+ std::unique_ptr<spirv_cross::CompilerGLSL> compiler_impl;
+ spirv_cross::CompilerGLSL* compiler;
+
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
+ // be updated.
+ shaderc_spvc::CompileOptions options;
+
+ // The range of Z-coordinate in the clipping volume of OpenGL is [-w, w], while it is
+ // [0, w] in D3D12, Metal and Vulkan, so we should normalize it in shaders in all
+ // backends. See the documentation of
+ // spirv_cross::CompilerGLSL::Options::vertex::fixup_clipspace for more details.
+ options.SetFlipVertY(true);
+ options.SetFixupClipspace(true);
+
+ // TODO(cwallez@chromium.org): discover the backing context version and use that.
+#if defined(DAWN_PLATFORM_APPLE)
+ options.SetGLSLLanguageVersion(410);
+#else
+ options.SetGLSLLanguageVersion(440);
+#endif
+ shaderc_spvc_status status =
+ mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
+ if (status != shaderc_spvc_status_success)
+ return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
+
+ compiler = reinterpret_cast<spirv_cross::CompilerGLSL*>(mSpvcContext.GetCompiler());
+ } else {
+ // If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
+ // be updated.
+ spirv_cross::CompilerGLSL::Options options;
+
+ // The range of Z-coordinate in the clipping volume of OpenGL is [-w, w], while it is
+ // [0, w] in D3D12, Metal and Vulkan, so we should normalize it in shaders in all
+ // backends. See the documentation of
+ // spirv_cross::CompilerGLSL::Options::vertex::fixup_clipspace for more details.
+ options.vertex.flip_vert_y = true;
+ options.vertex.fixup_clipspace = true;
+
+ // TODO(cwallez@chromium.org): discover the backing context version and use that.
#if defined(DAWN_PLATFORM_APPLE)
options.version = 410;
#else
options.version = 440;
#endif
- compiler.set_common_options(options);
- ExtractSpirvInfo(compiler);
+ compiler_impl =
+ std::make_unique<spirv_cross::CompilerGLSL>(descriptor->code, descriptor->codeSize);
+ compiler = compiler_impl.get();
+ compiler->set_common_options(options);
+ }
+
+ ExtractSpirvInfo(*compiler);
const auto& bindingInfo = GetBindingInfo();
// Extract bindings names so that it can be used to get its location in program.
// Now translate the separate sampler / textures into combined ones and store their info.
// We need to do this before removing the set and binding decorations.
- compiler.build_combined_image_samplers();
+ compiler->build_combined_image_samplers();
- for (const auto& combined : compiler.get_combined_image_samplers()) {
+ for (const auto& combined : compiler->get_combined_image_samplers()) {
mCombinedInfo.emplace_back();
auto& info = mCombinedInfo.back();
info.samplerLocation.group =
- compiler.get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
+ compiler->get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
info.samplerLocation.binding =
- compiler.get_decoration(combined.sampler_id, spv::DecorationBinding);
+ compiler->get_decoration(combined.sampler_id, spv::DecorationBinding);
info.textureLocation.group =
- compiler.get_decoration(combined.image_id, spv::DecorationDescriptorSet);
+ compiler->get_decoration(combined.image_id, spv::DecorationDescriptorSet);
info.textureLocation.binding =
- compiler.get_decoration(combined.image_id, spv::DecorationBinding);
- compiler.set_name(combined.combined_id, info.GetName());
+ compiler->get_decoration(combined.image_id, spv::DecorationBinding);
+ compiler->set_name(combined.combined_id, info.GetName());
}
// Change binding names to be "dawn_binding_<group>_<binding>".
@@ -100,22 +153,23 @@ namespace dawn_native { namespace opengl {
for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
const auto& info = bindingInfo[group][binding];
if (info.used) {
- compiler.set_name(info.base_type_id, GetBindingName(group, binding));
- compiler.unset_decoration(info.id, spv::DecorationBinding);
- compiler.unset_decoration(info.id, spv::DecorationDescriptorSet);
+ compiler->set_name(info.base_type_id, GetBindingName(group, binding));
+ compiler->unset_decoration(info.id, spv::DecorationBinding);
+ compiler->unset_decoration(info.id, spv::DecorationDescriptorSet);
}
}
}
- mGlslSource = compiler.compile();
- }
-
- const char* ShaderModule::GetSource() const {
- return mGlslSource.c_str();
- }
-
- const ShaderModule::CombinedSamplerInfo& ShaderModule::GetCombinedSamplerInfo() const {
- return mCombinedInfo;
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompilationResult result;
+ shaderc_spvc_status status = mSpvcContext.CompileShader(&result);
+ if (status != shaderc_spvc_status_success)
+ return DAWN_VALIDATION_ERROR("Unable to compile shader using spvc");
+ mGlslSource = result.GetStringOutput();
+ } else {
+ mGlslSource = compiler->compile();
+ }
+ return {};
}
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
index f7233003b9e..5764c583d61 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/ShaderModuleGL.h
@@ -40,7 +40,8 @@ namespace dawn_native { namespace opengl {
class ShaderModule : public ShaderModuleBase {
public:
- ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ static ResultOrError<ShaderModule*> Create(Device* device,
+ const ShaderModuleDescriptor* descriptor);
using CombinedSamplerInfo = std::vector<CombinedSampler>;
@@ -48,6 +49,9 @@ namespace dawn_native { namespace opengl {
const CombinedSamplerInfo& GetCombinedSamplerInfo() const;
private:
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+ MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
+
CombinedSamplerInfo mCombinedInfo;
std::string mGlslSource;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
index bbd707464ed..ea72bca1ac9 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/SwapChainGL.cpp
@@ -36,7 +36,7 @@ namespace dawn_native { namespace opengl {
DawnSwapChainNextTexture next = {};
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(dawn::ErrorType::Unknown, error);
+ GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
return nullptr;
}
GLuint nativeTexture = next.texture.u32;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index 4b4fdde1bcf..7647d21c348 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -27,7 +27,7 @@ namespace dawn_native { namespace opengl {
GLenum TargetForTexture(const TextureDescriptor* descriptor) {
switch (descriptor->dimension) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (descriptor->arrayLayerCount > 1) {
ASSERT(descriptor->sampleCount == 1);
return GL_TEXTURE_2D_ARRAY;
@@ -45,17 +45,17 @@ namespace dawn_native { namespace opengl {
}
}
- GLenum TargetForTextureViewDimension(dawn::TextureViewDimension dimension,
+ GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
uint32_t sampleCount) {
switch (dimension) {
- case dawn::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2D:
return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
- case dawn::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e2DArray:
ASSERT(sampleCount == 1);
return GL_TEXTURE_2D_ARRAY;
- case dawn::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::Cube:
return GL_TEXTURE_CUBE_MAP;
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::CubeArray:
return GL_TEXTURE_CUBE_MAP_ARRAY;
default:
UNREACHABLE();
@@ -69,9 +69,9 @@ namespace dawn_native { namespace opengl {
return handle;
}
- bool UsageNeedsTextureView(dawn::TextureUsage usage) {
- constexpr dawn::TextureUsage kUsageNeedingTextureView =
- dawn::TextureUsage::Storage | dawn::TextureUsage::Sampled;
+ bool UsageNeedsTextureView(wgpu::TextureUsage usage) {
+ constexpr wgpu::TextureUsage kUsageNeedingTextureView =
+ wgpu::TextureUsage::Storage | wgpu::TextureUsage::Sampled;
return usage & kUsageNeedingTextureView;
}
@@ -90,8 +90,8 @@ namespace dawn_native { namespace opengl {
}
switch (textureViewDescriptor->dimension) {
- case dawn::TextureViewDimension::Cube:
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::CubeArray:
return true;
default:
break;
@@ -122,7 +122,7 @@ namespace dawn_native { namespace opengl {
// GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
// https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
switch (GetDimension()) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
if (arrayLayers > 1) {
ASSERT(!IsMultisampledTexture());
gl.TexStorage3D(mTarget, levels, glFormat.internalFormat, width, height,
@@ -163,8 +163,10 @@ namespace dawn_native { namespace opengl {
}
void Texture::DestroyImpl() {
- ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
- mHandle = 0;
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+ mHandle = 0;
+ }
}
GLuint Texture::GetHandle() const {
@@ -252,7 +254,7 @@ namespace dawn_native { namespace opengl {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
descriptor.nextInChain = nullptr;
- descriptor.usage = dawn::BufferUsage::CopySrc | dawn::BufferUsage::MapWrite;
+ descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
// TODO(natlee@microsoft.com): use Dynamic Uplaoder here for temp buffer
Ref<Buffer> srcBuffer = ToBackend(device->CreateBuffer(&descriptor));
// Call release here to prevent memory leak since CreateBuffer will up the ref count to
@@ -278,7 +280,7 @@ namespace dawn_native { namespace opengl {
Extent3D size = GetMipLevelPhysicalSize(level);
switch (GetDimension()) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
// TODO(natlee@microsoft.com): This will break when layerCount is greater
// than 1, because the buffer is only sized for one layer.
ASSERT(layerCount == 1);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
index 00a4fca9617..3905b269f75 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.cpp
@@ -18,32 +18,32 @@
namespace dawn_native { namespace opengl {
- GLuint ToOpenGLCompareFunction(dawn::CompareFunction compareFunction) {
+ GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
switch (compareFunction) {
- case dawn::CompareFunction::Never:
+ case wgpu::CompareFunction::Never:
return GL_NEVER;
- case dawn::CompareFunction::Less:
+ case wgpu::CompareFunction::Less:
return GL_LESS;
- case dawn::CompareFunction::LessEqual:
+ case wgpu::CompareFunction::LessEqual:
return GL_LEQUAL;
- case dawn::CompareFunction::Greater:
+ case wgpu::CompareFunction::Greater:
return GL_GREATER;
- case dawn::CompareFunction::GreaterEqual:
+ case wgpu::CompareFunction::GreaterEqual:
return GL_GEQUAL;
- case dawn::CompareFunction::NotEqual:
+ case wgpu::CompareFunction::NotEqual:
return GL_NOTEQUAL;
- case dawn::CompareFunction::Equal:
+ case wgpu::CompareFunction::Equal:
return GL_EQUAL;
- case dawn::CompareFunction::Always:
+ case wgpu::CompareFunction::Always:
return GL_ALWAYS;
default:
UNREACHABLE();
}
}
- GLint GetStencilMaskFromStencilFormat(dawn::TextureFormat depthStencilFormat) {
+ GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
switch (depthStencilFormat) {
- case dawn::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
return 0xFF;
default:
UNREACHABLE();
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h
index 5c8f8ed45ed..2f87b378132 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/UtilsGL.h
@@ -20,8 +20,8 @@
namespace dawn_native { namespace opengl {
- GLuint ToOpenGLCompareFunction(dawn::CompareFunction compareFunction);
- GLint GetStencilMaskFromStencilFormat(dawn::TextureFormat depthStencilFormat);
+ GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
+ GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
}} // namespace dawn_native::opengl
#endif // DAWNNATIVE_OPENGL_UTILSGL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index 398569d9baf..06a2c3417e2 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/BackendVk.h"
+#include "common/SystemUtils.h"
#include "dawn_native/Instance.h"
#include "dawn_native/VulkanBackend.h"
#include "dawn_native/vulkan/AdapterVk.h"
@@ -21,11 +22,15 @@
#include <iostream>
-#if DAWN_PLATFORM_LINUX
+#if defined(DAWN_PLATFORM_LINUX)
+# if defined(DAWN_PLATFORM_ANDROID)
+const char kVulkanLibName[] = "libvulkan.so";
+# else
const char kVulkanLibName[] = "libvulkan.so.1";
-#elif DAWN_PLATFORM_WINDOWS
+# endif
+#elif defined(DAWN_PLATFORM_WINDOWS)
const char kVulkanLibName[] = "vulkan-1.dll";
-#elif DAWN_PLATFORM_FUCHSIA
+#elif defined(DAWN_PLATFORM_FUCHSIA)
const char kVulkanLibName[] = "libvulkan.so";
#else
# error "Unimplemented Vulkan backend platform"
@@ -62,6 +67,22 @@ namespace dawn_native { namespace vulkan {
}
MaybeError Backend::Initialize() {
+#if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
+ if (GetInstance()->IsBackendValidationEnabled()) {
+ std::string vkDataDir = GetExecutableDirectory() + DAWN_VK_DATA_DIR;
+ if (!SetEnvironmentVar("VK_LAYER_PATH", vkDataDir.c_str())) {
+ return DAWN_DEVICE_LOST_ERROR("Couldn't set VK_LAYER_PATH");
+ }
+ }
+#endif
+#if defined(DAWN_SWIFTSHADER_VK_ICD_JSON)
+ std::string fullSwiftshaderICDPath =
+ GetExecutableDirectory() + DAWN_SWIFTSHADER_VK_ICD_JSON;
+ if (!SetEnvironmentVar("VK_ICD_FILENAMES", fullSwiftshaderICDPath.c_str())) {
+ return DAWN_DEVICE_LOST_ERROR("Couldn't set VK_ICD_FILENAMES");
+ }
+#endif
+
if (!mVulkanLib.Open(kVulkanLibName)) {
return DAWN_DEVICE_LOST_ERROR(std::string("Couldn't open ") + kVulkanLibName);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 336d9a646a5..8408698e167 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -15,23 +15,27 @@
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "common/BitSetIterator.h"
+#include "dawn_native/vulkan/DescriptorSetService.h"
#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/VulkanError.h"
+#include <map>
+
namespace dawn_native { namespace vulkan {
namespace {
- VkShaderStageFlags VulkanShaderStageFlags(dawn::ShaderStage stages) {
+ VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
VkShaderStageFlags flags = 0;
- if (stages & dawn::ShaderStage::Vertex) {
+ if (stages & wgpu::ShaderStage::Vertex) {
flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
- if (stages & dawn::ShaderStage::Fragment) {
+ if (stages & wgpu::ShaderStage::Fragment) {
flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
}
- if (stages & dawn::ShaderStage::Compute) {
+ if (stages & wgpu::ShaderStage::Compute) {
flags |= VK_SHADER_STAGE_COMPUTE_BIT;
}
@@ -40,18 +44,18 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
- VkDescriptorType VulkanDescriptorType(dawn::BindingType type, bool isDynamic) {
+ VkDescriptorType VulkanDescriptorType(wgpu::BindingType type, bool isDynamic) {
switch (type) {
- case dawn::BindingType::UniformBuffer:
+ case wgpu::BindingType::UniformBuffer:
if (isDynamic) {
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
}
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
- case dawn::BindingType::Sampler:
+ case wgpu::BindingType::Sampler:
return VK_DESCRIPTOR_TYPE_SAMPLER;
- case dawn::BindingType::SampledTexture:
+ case wgpu::BindingType::SampledTexture:
return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
- case dawn::BindingType::StorageBuffer:
+ case wgpu::BindingType::StorageBuffer:
if (isDynamic) {
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
}
@@ -72,7 +76,7 @@ namespace dawn_native { namespace vulkan {
}
MaybeError BindGroupLayout::Initialize() {
- const auto& info = GetBindingInfo();
+ const LayoutBindingInfo& info = GetBindingInfo();
// Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
// one entry per binding set. This might be optimized by computing continuous ranges of
@@ -80,13 +84,13 @@ namespace dawn_native { namespace vulkan {
uint32_t numBindings = 0;
std::array<VkDescriptorSetLayoutBinding, kMaxBindingsPerGroup> bindings;
for (uint32_t bindingIndex : IterateBitSet(info.mask)) {
- auto& binding = bindings[numBindings];
- binding.binding = bindingIndex;
- binding.descriptorType =
+ VkDescriptorSetLayoutBinding* binding = &bindings[numBindings];
+ binding->binding = bindingIndex;
+ binding->descriptorType =
VulkanDescriptorType(info.types[bindingIndex], info.hasDynamicOffset[bindingIndex]);
- binding.descriptorCount = 1;
- binding.stageFlags = VulkanShaderStageFlags(info.visibilities[bindingIndex]);
- binding.pImmutableSamplers = nullptr;
+ binding->descriptorCount = 1;
+ binding->stageFlags = VulkanShaderStageFlags(info.visibilities[bindingIndex]);
+ binding->pImmutableSamplers = nullptr;
numBindings++;
}
@@ -99,73 +103,113 @@ namespace dawn_native { namespace vulkan {
createInfo.pBindings = bindings.data();
Device* device = ToBackend(GetDevice());
- return CheckVkSuccess(device->fn.CreateDescriptorSetLayout(device->GetVkDevice(),
- &createInfo, nullptr, &mHandle),
- "CreateDescriptorSetLayout");
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
+ device->GetVkDevice(), &createInfo, nullptr, &mHandle),
+ "CreateDescriptorSetLayout"));
+
+ // Compute the size of descriptor pools used for this layout.
+ std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
+
+ for (uint32_t bindingIndex : IterateBitSet(info.mask)) {
+ VkDescriptorType vulkanType =
+ VulkanDescriptorType(info.types[bindingIndex], info.hasDynamicOffset[bindingIndex]);
+
+ // map::operator[] will return 0 if the key doesn't exist.
+ descriptorCountPerType[vulkanType]++;
+ }
+
+ mPoolSizes.reserve(descriptorCountPerType.size());
+ for (const auto& it : descriptorCountPerType) {
+ mPoolSizes.push_back(VkDescriptorPoolSize{it.first, it.second});
+ }
+
+ return {};
}
BindGroupLayout::~BindGroupLayout() {
+ Device* device = ToBackend(GetDevice());
+
// DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
// so we destroy mHandle immediately instead of using the FencedDeleter
if (mHandle != VK_NULL_HANDLE) {
- Device* device = ToBackend(GetDevice());
device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
mHandle = VK_NULL_HANDLE;
}
+
+ FencedDeleter* deleter = device->GetFencedDeleter();
+ for (const SingleDescriptorSetAllocation& allocation : mAllocations) {
+ deleter->DeleteWhenUnused(allocation.pool);
+ }
+ mAllocations.clear();
}
VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
return mHandle;
}
- BindGroupLayout::PoolSizeSpec BindGroupLayout::ComputePoolSizes(uint32_t* numPoolSizes) const {
- uint32_t numSizes = 0;
- PoolSizeSpec result{};
-
- // Defines an array and indices into it that will contain for each sampler type at which
- // position it is in the PoolSizeSpec, or -1 if it isn't present yet.
- enum DescriptorType {
- UNIFORM_BUFFER,
- SAMPLER,
- SAMPLED_IMAGE,
- STORAGE_BUFFER,
- MAX_TYPE,
- };
- static_assert(MAX_TYPE == kMaxPoolSizesNeeded, "");
- auto ToDescriptorType = [](dawn::BindingType type) -> DescriptorType {
- switch (type) {
- case dawn::BindingType::UniformBuffer:
- return UNIFORM_BUFFER;
- case dawn::BindingType::Sampler:
- return SAMPLER;
- case dawn::BindingType::SampledTexture:
- return SAMPLED_IMAGE;
- case dawn::BindingType::StorageBuffer:
- return STORAGE_BUFFER;
- default:
- UNREACHABLE();
- }
- };
+ ResultOrError<DescriptorSetAllocation> BindGroupLayout::AllocateOneSet() {
+ Device* device = ToBackend(GetDevice());
- std::array<int, MAX_TYPE> descriptorTypeIndex;
- descriptorTypeIndex.fill(-1);
+ // Reuse a previous allocation if available.
+ if (!mAvailableAllocations.empty()) {
+ size_t index = mAvailableAllocations.back();
+ mAvailableAllocations.pop_back();
+ return {{index, mAllocations[index].set}};
+ }
- const auto& info = GetBindingInfo();
- for (uint32_t bindingIndex : IterateBitSet(info.mask)) {
- DescriptorType type = ToDescriptorType(info.types[bindingIndex]);
-
- if (descriptorTypeIndex[type] == -1) {
- descriptorTypeIndex[type] = numSizes;
- result[numSizes].type = VulkanDescriptorType(info.types[bindingIndex],
- info.hasDynamicOffset[bindingIndex]);
- result[numSizes].descriptorCount = 1;
- numSizes++;
- } else {
- result[descriptorTypeIndex[type]].descriptorCount++;
- }
+ // Create a pool to hold our descriptor set.
+ // TODO(cwallez@chromium.org): This horribly inefficient, have more than one descriptor
+ // set per pool.
+ VkDescriptorPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.maxSets = 1;
+ createInfo.poolSizeCount = static_cast<uint32_t>(mPoolSizes.size());
+ createInfo.pPoolSizes = mPoolSizes.data();
+
+ VkDescriptorPool descriptorPool;
+ DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
+ nullptr, &descriptorPool),
+ "CreateDescriptorPool"));
+
+ // Allocate our single set.
+ VkDescriptorSetAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.descriptorPool = descriptorPool;
+ allocateInfo.descriptorSetCount = 1;
+ allocateInfo.pSetLayouts = &mHandle;
+
+ VkDescriptorSet descriptorSet;
+ MaybeError result = CheckVkSuccess(
+ device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo, &descriptorSet),
+ "AllocateDescriptorSets");
+
+ if (result.IsError()) {
+ // On an error we can destroy the pool immediately because no command references it.
+ device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
+ return result.AcquireError();
}
- *numPoolSizes = numSizes;
- return result;
+ mAllocations.push_back({descriptorPool, descriptorSet});
+ return {{mAllocations.size() - 1, descriptorSet}};
}
+
+ void BindGroupLayout::Deallocate(DescriptorSetAllocation* allocation) {
+ // We can't reuse the descriptor set right away because the Vulkan spec says in the
+ // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
+ // host execution of the command and the end of the draw/dispatch.
+ ToBackend(GetDevice())
+ ->GetDescriptorSetService()
+ ->AddDeferredDeallocation(this, allocation->index);
+
+ // Clear the content of allocation so that use after frees are more visible.
+ *allocation = {};
+ }
+
+ void BindGroupLayout::FinishDeallocation(size_t index) {
+ mAvailableAllocations.push_back(index);
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
index 12ba2b61cc8..947f29d6560 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.h
@@ -19,12 +19,32 @@
#include "common/vulkan_platform.h"
+#include <vector>
+
namespace dawn_native { namespace vulkan {
class Device;
- VkDescriptorType VulkanDescriptorType(dawn::BindingType type, bool isDynamic);
+ VkDescriptorType VulkanDescriptorType(wgpu::BindingType type, bool isDynamic);
+
+ // Contains a descriptor set along with data necessary to track its allocation.
+ struct DescriptorSetAllocation {
+ size_t index = 0;
+ VkDescriptorSet set = VK_NULL_HANDLE;
+ };
+ // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
+ // it's hard to have something where we can mix different types of descriptor sets because
+ // we don't know if their vector of number of descriptors will be similar.
+ //
+ // That's why that in addition to containing the VkDescriptorSetLayout to create
+ // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
+ // sets.
+ //
+ // The allocations is done with one pool per descriptor set, which is inefficient, but at least
+ // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
+ // is important because creating them can incur GPU memory allocation which is usually an
+ // expensive syscall.
class BindGroupLayout : public BindGroupLayoutBase {
public:
static ResultOrError<BindGroupLayout*> Create(Device* device,
@@ -33,14 +53,26 @@ namespace dawn_native { namespace vulkan {
VkDescriptorSetLayout GetHandle() const;
- static constexpr size_t kMaxPoolSizesNeeded = 4;
- using PoolSizeSpec = std::array<VkDescriptorPoolSize, kMaxPoolSizesNeeded>;
- PoolSizeSpec ComputePoolSizes(uint32_t* numPoolSizes) const;
+ ResultOrError<DescriptorSetAllocation> AllocateOneSet();
+ void Deallocate(DescriptorSetAllocation* allocation);
+
+ // Interaction with the DescriptorSetService.
+ void FinishDeallocation(size_t index);
private:
using BindGroupLayoutBase::BindGroupLayoutBase;
MaybeError Initialize();
+ std::vector<VkDescriptorPoolSize> mPoolSizes;
+
+ struct SingleDescriptorSetAllocation {
+ VkDescriptorPool pool = VK_NULL_HANDLE;
+ // Descriptor sets are freed when the pool is destroyed.
+ VkDescriptorSet set = VK_NULL_HANDLE;
+ };
+ std::vector<SingleDescriptorSetAllocation> mAllocations;
+ std::vector<size_t> mAvailableAllocations;
+
VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index aa0f5b5d0d2..5c56030beaf 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -36,37 +36,7 @@ namespace dawn_native { namespace vulkan {
MaybeError BindGroup::Initialize() {
Device* device = ToBackend(GetDevice());
- // Create a pool to hold our descriptor set.
- // TODO(cwallez@chromium.org): This horribly inefficient, find a way to be better, for
- // example by having one pool per bind group layout instead.
- uint32_t numPoolSizes = 0;
- auto poolSizes = ToBackend(GetLayout())->ComputePoolSizes(&numPoolSizes);
-
- VkDescriptorPoolCreateInfo createInfo;
- createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = 0;
- createInfo.maxSets = 1;
- createInfo.poolSizeCount = numPoolSizes;
- createInfo.pPoolSizes = poolSizes.data();
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo, nullptr, &mPool),
- "CreateDescriptorPool"));
-
- // Now do the allocation of one descriptor set, this is very suboptimal too.
- VkDescriptorSetLayout vkLayout = ToBackend(GetLayout())->GetHandle();
-
- VkDescriptorSetAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.descriptorPool = mPool;
- allocateInfo.descriptorSetCount = 1;
- allocateInfo.pSetLayouts = &vkLayout;
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo, &mHandle),
- "AllocateDescriptorSets"));
+ DAWN_TRY_ASSIGN(mAllocation, ToBackend(GetLayout())->AllocateOneSet());
// Now do a write of a single descriptor set with all possible chained data allocated on the
// stack.
@@ -80,7 +50,7 @@ namespace dawn_native { namespace vulkan {
auto& write = writes[numWrites];
write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write.pNext = nullptr;
- write.dstSet = mHandle;
+ write.dstSet = mAllocation.set;
write.dstBinding = bindingIndex;
write.dstArrayElement = 0;
write.descriptorCount = 1;
@@ -88,8 +58,8 @@ namespace dawn_native { namespace vulkan {
layoutInfo.hasDynamicOffset[bindingIndex]);
switch (layoutInfo.types[bindingIndex]) {
- case dawn::BindingType::UniformBuffer:
- case dawn::BindingType::StorageBuffer: {
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::StorageBuffer: {
BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
writeBufferInfo[numWrites].buffer = ToBackend(binding.buffer)->GetHandle();
@@ -98,17 +68,17 @@ namespace dawn_native { namespace vulkan {
write.pBufferInfo = &writeBufferInfo[numWrites];
} break;
- case dawn::BindingType::Sampler: {
+ case wgpu::BindingType::Sampler: {
Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
writeImageInfo[numWrites].sampler = sampler->GetHandle();
write.pImageInfo = &writeImageInfo[numWrites];
} break;
- case dawn::BindingType::SampledTexture: {
+ case wgpu::BindingType::SampledTexture: {
TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
writeImageInfo[numWrites].imageView = view->GetHandle();
- // TODO(cwallez@chromium.org): This isn't true in general: if the image can has
+ // TODO(cwallez@chromium.org): This isn't true in general: if the image has
// two read-only usages one of which is Sampled. Works for now though :)
writeImageInfo[numWrites].imageLayout =
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@@ -123,6 +93,7 @@ namespace dawn_native { namespace vulkan {
numWrites++;
}
+ // TODO(cwallez@chromium.org): Batch these updates
device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
nullptr);
@@ -130,18 +101,11 @@ namespace dawn_native { namespace vulkan {
}
BindGroup::~BindGroup() {
- // The descriptor set doesn't need to be delete because it's done implicitly when the
- // descriptor pool is destroyed.
- mHandle = VK_NULL_HANDLE;
-
- if (mPool != VK_NULL_HANDLE) {
- ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mPool);
- mPool = VK_NULL_HANDLE;
- }
+ ToBackend(GetLayout())->Deallocate(&mAllocation);
}
VkDescriptorSet BindGroup::GetHandle() const {
- return mHandle;
+ return mAllocation.set;
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
index bfa3fead67a..4dd4c218a7c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.h
@@ -17,7 +17,7 @@
#include "dawn_native/BindGroup.h"
-#include "common/vulkan_platform.h"
+#include "dawn_native/vulkan/BindGroupLayoutVk.h"
namespace dawn_native { namespace vulkan {
@@ -35,8 +35,9 @@ namespace dawn_native { namespace vulkan {
using BindGroupBase::BindGroupBase;
MaybeError Initialize();
- VkDescriptorPool mPool = VK_NULL_HANDLE;
- VkDescriptorSet mHandle = VK_NULL_HANDLE;
+ // The descriptor set in this allocation outlives the BindGroup because it is owned by
+ // the BindGroupLayout which is referenced by the BindGroup.
+ DescriptorSetAllocation mAllocation;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index 2133457026f..baa71028c22 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -16,8 +16,8 @@
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/MemoryResourceAllocatorVk.h"
-#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/vulkan/ResourceHeapVk.h"
+#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
#include "dawn_native/vulkan/VulkanError.h"
#include <cstring>
@@ -26,86 +26,86 @@ namespace dawn_native { namespace vulkan {
namespace {
- VkBufferUsageFlags VulkanBufferUsage(dawn::BufferUsage usage) {
+ VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
VkBufferUsageFlags flags = 0;
- if (usage & dawn::BufferUsage::CopySrc) {
+ if (usage & wgpu::BufferUsage::CopySrc) {
flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
}
- if (usage & dawn::BufferUsage::CopyDst) {
+ if (usage & wgpu::BufferUsage::CopyDst) {
flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
- if (usage & dawn::BufferUsage::Index) {
+ if (usage & wgpu::BufferUsage::Index) {
flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
}
- if (usage & dawn::BufferUsage::Vertex) {
+ if (usage & wgpu::BufferUsage::Vertex) {
flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
}
- if (usage & dawn::BufferUsage::Uniform) {
+ if (usage & wgpu::BufferUsage::Uniform) {
flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
}
- if (usage & dawn::BufferUsage::Storage) {
+ if (usage & wgpu::BufferUsage::Storage) {
flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
}
- if (usage & dawn::BufferUsage::Indirect) {
+ if (usage & wgpu::BufferUsage::Indirect) {
flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
}
return flags;
}
- VkPipelineStageFlags VulkanPipelineStage(dawn::BufferUsage usage) {
+ VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
VkPipelineStageFlags flags = 0;
- if (usage & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) {
+ if (usage & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) {
flags |= VK_PIPELINE_STAGE_HOST_BIT;
}
- if (usage & (dawn::BufferUsage::CopySrc | dawn::BufferUsage::CopyDst)) {
+ if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
}
- if (usage & (dawn::BufferUsage::Index | dawn::BufferUsage::Vertex)) {
+ if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
}
- if (usage & (dawn::BufferUsage::Uniform | dawn::BufferUsage::Storage)) {
+ if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage)) {
flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
- if (usage & dawn::BufferUsage::Indirect) {
+ if (usage & wgpu::BufferUsage::Indirect) {
flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
}
return flags;
}
- VkAccessFlags VulkanAccessFlags(dawn::BufferUsage usage) {
+ VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
VkAccessFlags flags = 0;
- if (usage & dawn::BufferUsage::MapRead) {
+ if (usage & wgpu::BufferUsage::MapRead) {
flags |= VK_ACCESS_HOST_READ_BIT;
}
- if (usage & dawn::BufferUsage::MapWrite) {
+ if (usage & wgpu::BufferUsage::MapWrite) {
flags |= VK_ACCESS_HOST_WRITE_BIT;
}
- if (usage & dawn::BufferUsage::CopySrc) {
+ if (usage & wgpu::BufferUsage::CopySrc) {
flags |= VK_ACCESS_TRANSFER_READ_BIT;
}
- if (usage & dawn::BufferUsage::CopyDst) {
+ if (usage & wgpu::BufferUsage::CopyDst) {
flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
- if (usage & dawn::BufferUsage::Index) {
+ if (usage & wgpu::BufferUsage::Index) {
flags |= VK_ACCESS_INDEX_READ_BIT;
}
- if (usage & dawn::BufferUsage::Vertex) {
+ if (usage & wgpu::BufferUsage::Vertex) {
flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
}
- if (usage & dawn::BufferUsage::Uniform) {
+ if (usage & wgpu::BufferUsage::Uniform) {
flags |= VK_ACCESS_UNIFORM_READ_BIT;
}
- if (usage & dawn::BufferUsage::Storage) {
+ if (usage & wgpu::BufferUsage::Storage) {
flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (usage & dawn::BufferUsage::Indirect) {
+ if (usage & wgpu::BufferUsage::Indirect) {
flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
}
@@ -129,7 +129,7 @@ namespace dawn_native { namespace vulkan {
createInfo.size = GetSize();
// Add CopyDst for non-mappable buffer initialization in CreateBufferMapped
// and robust resource initialization.
- createInfo.usage = VulkanBufferUsage(GetUsage() | dawn::BufferUsage::CopyDst);
+ createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = 0;
@@ -143,7 +143,7 @@ namespace dawn_native { namespace vulkan {
device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
bool requestMappable =
- (GetUsage() & (dawn::BufferUsage::MapRead | dawn::BufferUsage::MapWrite)) != 0;
+ (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
DAWN_TRY_ASSIGN(mMemoryAllocation, device->AllocateMemory(requirements, requestMappable));
DAWN_TRY(CheckVkSuccess(
@@ -160,11 +160,11 @@ namespace dawn_native { namespace vulkan {
}
void Buffer::OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data) {
- CallMapReadCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
}
void Buffer::OnMapWriteCommandSerialFinished(uint32_t mapSerial, void* data) {
- CallMapWriteCallback(mapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, data, GetSize());
+ CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
}
VkBuffer Buffer::GetHandle() const {
@@ -172,7 +172,7 @@ namespace dawn_native { namespace vulkan {
}
void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
- dawn::BufferUsage usage) {
+ wgpu::BufferUsage usage) {
bool lastIncludesTarget = (mLastUsage & usage) == usage;
bool lastReadOnly = (mLastUsage & kReadOnlyBufferUsages) == mLastUsage;
@@ -182,7 +182,7 @@ namespace dawn_native { namespace vulkan {
}
// Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
- if (mLastUsage == dawn::BufferUsage::None) {
+ if (mLastUsage == wgpu::BufferUsage::None) {
mLastUsage = usage;
return;
}
@@ -222,7 +222,7 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- TransitionUsageNow(recordingContext, dawn::BufferUsage::MapRead);
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
uint8_t* memory = mMemoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
@@ -236,7 +236,7 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
- TransitionUsageNow(recordingContext, dawn::BufferUsage::MapWrite);
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
uint8_t* memory = mMemoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
@@ -251,7 +251,7 @@ namespace dawn_native { namespace vulkan {
}
void Buffer::DestroyImpl() {
- ToBackend(GetDevice())->DeallocateMemory(mMemoryAllocation);
+ ToBackend(GetDevice())->DeallocateMemory(&mMemoryAllocation);
if (mHandle != VK_NULL_HANDLE) {
ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
index 210fadccf16..34a94a43ab7 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
@@ -20,7 +20,6 @@
#include "common/SerialQueue.h"
#include "common/vulkan_platform.h"
#include "dawn_native/ResourceMemoryAllocation.h"
-#include "dawn_native/vulkan/MemoryAllocator.h"
namespace dawn_native { namespace vulkan {
@@ -40,7 +39,7 @@ namespace dawn_native { namespace vulkan {
// Transitions the buffer to be used as `usage`, recording any necessary barrier in
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
- void TransitionUsageNow(CommandRecordingContext* recordingContext, dawn::BufferUsage usage);
+ void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
private:
using BufferBase::BufferBase;
@@ -58,7 +57,7 @@ namespace dawn_native { namespace vulkan {
VkBuffer mHandle = VK_NULL_HANDLE;
ResourceMemoryAllocation mMemoryAllocation;
- dawn::BufferUsage mLastUsage = dawn::BufferUsage::None;
+ wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
};
class MapRequestTracker {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index 0e2d00349fc..d88512233d3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -14,7 +14,7 @@
#include "dawn_native/vulkan/CommandBufferVk.h"
-#include "dawn_native/BindGroupTracker.h"
+#include "dawn_native/BindGroupAndStorageBarrierTracker.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/RenderBundle.h"
@@ -35,11 +35,11 @@ namespace dawn_native { namespace vulkan {
namespace {
- VkIndexType VulkanIndexType(dawn::IndexFormat format) {
+ VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
switch (format) {
- case dawn::IndexFormat::Uint16:
+ case wgpu::IndexFormat::Uint16:
return VK_INDEX_TYPE_UINT16;
- case dawn::IndexFormat::Uint32:
+ case wgpu::IndexFormat::Uint32:
return VK_INDEX_TYPE_UINT32;
default:
UNREACHABLE();
@@ -91,16 +91,77 @@ namespace dawn_native { namespace vulkan {
return region;
}
- class DescriptorSetTracker : public BindGroupTrackerBase<VkDescriptorSet, true, uint32_t> {
+ void ApplyDescriptorSets(Device* device,
+ VkCommandBuffer commands,
+ VkPipelineBindPoint bindPoint,
+ VkPipelineLayout pipelineLayout,
+ const std::bitset<kMaxBindGroups>& bindGroupsToApply,
+ const std::array<BindGroupBase*, kMaxBindGroups>& bindGroups,
+ const std::array<uint32_t, kMaxBindGroups>& dynamicOffsetCounts,
+ const std::array<std::array<uint32_t, kMaxBindingsPerGroup>,
+ kMaxBindGroups>& dynamicOffsets) {
+ for (uint32_t dirtyIndex : IterateBitSet(bindGroupsToApply)) {
+ VkDescriptorSet set = ToBackend(bindGroups[dirtyIndex])->GetHandle();
+ const uint32_t* dynamicOffset = dynamicOffsetCounts[dirtyIndex] > 0
+ ? dynamicOffsets[dirtyIndex].data()
+ : nullptr;
+ device->fn.CmdBindDescriptorSets(commands, bindPoint, pipelineLayout, dirtyIndex, 1,
+ &set, dynamicOffsetCounts[dirtyIndex],
+ dynamicOffset);
+ }
+ }
+
+ class RenderDescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+ public:
+ RenderDescriptorSetTracker() = default;
+
+ void Apply(Device* device,
+ CommandRecordingContext* recordingContext,
+ VkPipelineBindPoint bindPoint) {
+ ApplyDescriptorSets(device, recordingContext->commandBuffer, bindPoint,
+ ToBackend(mPipelineLayout)->GetHandle(),
+ mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
+ mDynamicOffsetCounts, mDynamicOffsets);
+ DidApply();
+ }
+ };
+
+ class ComputeDescriptorSetTracker
+ : public BindGroupAndStorageBarrierTrackerBase<true, uint32_t> {
public:
- void Apply(Device* device, VkCommandBuffer commands, VkPipelineBindPoint bindPoint) {
- for (uint32_t dirtyIndex :
- IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
- device->fn.CmdBindDescriptorSets(
- commands, bindPoint, ToBackend(mPipelineLayout)->GetHandle(), dirtyIndex, 1,
- &mBindGroups[dirtyIndex], mDynamicOffsetCounts[dirtyIndex],
- mDynamicOffsetCounts[dirtyIndex] > 0 ? mDynamicOffsets[dirtyIndex].data()
- : nullptr);
+ ComputeDescriptorSetTracker() = default;
+
+ void Apply(Device* device,
+ CommandRecordingContext* recordingContext,
+ VkPipelineBindPoint bindPoint) {
+ ApplyDescriptorSets(device, recordingContext->commandBuffer, bindPoint,
+ ToBackend(mPipelineLayout)->GetHandle(),
+ mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
+ mDynamicOffsetCounts, mDynamicOffsets);
+
+ for (uint32_t index : IterateBitSet(mBindGroupLayoutsMask)) {
+ for (uint32_t binding : IterateBitSet(mBuffersNeedingBarrier[index])) {
+ switch (mBindingTypes[index][binding]) {
+ case wgpu::BindingType::StorageBuffer:
+ ToBackend(mBuffers[index][binding])
+ ->TransitionUsageNow(recordingContext,
+ wgpu::BufferUsage::Storage);
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ // Not implemented.
+
+ case wgpu::BindingType::UniformBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::SampledTexture:
+ // Don't require barriers.
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
}
DidApply();
}
@@ -122,13 +183,13 @@ namespace dawn_native { namespace vulkan {
TextureView* view = ToBackend(attachmentInfo.view.Get());
bool hasResolveTarget = attachmentInfo.resolveTarget.Get() != nullptr;
- dawn::LoadOp loadOp = attachmentInfo.loadOp;
+ wgpu::LoadOp loadOp = attachmentInfo.loadOp;
ASSERT(view->GetLayerCount() == 1);
ASSERT(view->GetLevelCount() == 1);
- if (loadOp == dawn::LoadOp::Load &&
+ if (loadOp == wgpu::LoadOp::Load &&
!view->GetTexture()->IsSubresourceContentInitialized(
view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1)) {
- loadOp = dawn::LoadOp::Clear;
+ loadOp = wgpu::LoadOp::Clear;
}
if (hasResolveTarget) {
@@ -143,12 +204,12 @@ namespace dawn_native { namespace vulkan {
}
switch (attachmentInfo.storeOp) {
- case dawn::StoreOp::Store: {
+ case wgpu::StoreOp::Store: {
view->GetTexture()->SetIsSubresourceContentInitialized(
true, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
} break;
- case dawn::StoreOp::Clear: {
+ case wgpu::StoreOp::Clear: {
view->GetTexture()->SetIsSubresourceContentInitialized(
false, view->GetBaseMipLevel(), 1, view->GetBaseArrayLayer(), 1);
} break;
@@ -170,26 +231,26 @@ namespace dawn_native { namespace vulkan {
view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount())) {
if (view->GetTexture()->GetFormat().HasDepth() &&
- attachmentInfo.depthLoadOp == dawn::LoadOp::Load) {
+ attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = dawn::LoadOp::Clear;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
}
if (view->GetTexture()->GetFormat().HasStencil() &&
- attachmentInfo.stencilLoadOp == dawn::LoadOp::Load) {
+ attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = dawn::LoadOp::Clear;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
}
}
query.SetDepthStencil(view->GetTexture()->GetFormat().format,
attachmentInfo.depthLoadOp, attachmentInfo.stencilLoadOp);
- if (attachmentInfo.depthStoreOp == dawn::StoreOp::Store &&
- attachmentInfo.stencilStoreOp == dawn::StoreOp::Store) {
+ if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Store &&
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store) {
view->GetTexture()->SetIsSubresourceContentInitialized(
true, view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount());
- } else if (attachmentInfo.depthStoreOp == dawn::StoreOp::Clear &&
- attachmentInfo.stencilStoreOp == dawn::StoreOp::Clear) {
+ } else if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Clear &&
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Clear) {
view->GetTexture()->SetIsSubresourceContentInitialized(
false, view->GetBaseMipLevel(), view->GetLevelCount(),
view->GetBaseArrayLayer(), view->GetLayerCount());
@@ -290,13 +351,12 @@ namespace dawn_native { namespace vulkan {
} // anonymous namespace
// static
- CommandBuffer* CommandBuffer::Create(CommandEncoderBase* encoder,
+ CommandBuffer* CommandBuffer::Create(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return new CommandBuffer(encoder, descriptor);
}
- CommandBuffer::CommandBuffer(CommandEncoderBase* encoder,
- const CommandBufferDescriptor* descriptor)
+ CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
: CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
}
@@ -321,7 +381,7 @@ namespace dawn_native { namespace vulkan {
format.blockByteSize;
BufferDescriptor tempBufferDescriptor;
tempBufferDescriptor.size = tempBufferSize;
- tempBufferDescriptor.usage = dawn::BufferUsage::CopySrc | dawn::BufferUsage::CopyDst;
+ tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
Device* device = ToBackend(GetDevice());
Ref<Buffer> tempBuffer = AcquireRef(ToBackend(device->CreateBuffer(&tempBufferDescriptor)));
@@ -336,7 +396,7 @@ namespace dawn_native { namespace vulkan {
VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
- tempBuffer->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopyDst);
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkBufferImageCopy srcToTempBufferRegion =
ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
@@ -344,7 +404,7 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
- tempBuffer->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopySrc);
+ tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
VkBufferImageCopy tempBufferToDstRegion =
ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
@@ -373,7 +433,7 @@ namespace dawn_native { namespace vulkan {
// Clear textures that are not output attachments. Output attachments will be
// cleared in RecordBeginRenderPass by setting the loadop to clear when the
// texture subresource has not been initialized before the render pass.
- if (!(usages.textureUsages[i] & dawn::TextureUsage::OutputAttachment)) {
+ if (!(usages.textureUsages[i] & wgpu::TextureUsage::OutputAttachment)) {
texture->EnsureSubresourceContentInitialized(recordingContext, 0,
texture->GetNumMipLevels(), 0,
texture->GetArrayLayers());
@@ -392,8 +452,8 @@ namespace dawn_native { namespace vulkan {
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
- srcBuffer->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopySrc);
- dstBuffer->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopyDst);
+ srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+ dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkBufferCopy region;
region.srcOffset = copy->sourceOffset;
@@ -426,9 +486,9 @@ namespace dawn_native { namespace vulkan {
subresource.baseArrayLayer, 1);
}
ToBackend(src.buffer)
- ->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopySrc);
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, dawn::TextureUsage::CopyDst);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
VkImage dstImage = ToBackend(dst.texture)->GetHandle();
@@ -454,9 +514,9 @@ namespace dawn_native { namespace vulkan {
subresource.baseArrayLayer, 1);
ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, dawn::TextureUsage::CopySrc);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc);
ToBackend(dst.buffer)
- ->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopyDst);
+ ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkImage srcImage = ToBackend(src.texture)->GetHandle();
VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
@@ -486,9 +546,9 @@ namespace dawn_native { namespace vulkan {
}
ToBackend(src.texture)
- ->TransitionUsageNow(recordingContext, dawn::TextureUsage::CopySrc);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc);
ToBackend(dst.texture)
- ->TransitionUsageNow(recordingContext, dawn::TextureUsage::CopyDst);
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
// In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
// because as Vulkan SPEC always validates image copies with the virtual size of
@@ -553,7 +613,7 @@ namespace dawn_native { namespace vulkan {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
- DescriptorSetTracker descriptorSets = {};
+ ComputeDescriptorSetTracker descriptorSets = {};
Command type;
while (mCommands.NextCommandId(&type)) {
@@ -565,7 +625,8 @@ namespace dawn_native { namespace vulkan {
case Command::Dispatch: {
DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
} break;
@@ -573,7 +634,7 @@ namespace dawn_native { namespace vulkan {
DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_COMPUTE);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
device->fn.CmdDispatchIndirect(
commands, indirectBuffer,
static_cast<VkDeviceSize>(dispatch->indirectOffset));
@@ -581,13 +642,14 @@ namespace dawn_native { namespace vulkan {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
- VkDescriptorSet set = ToBackend(cmd->group.Get())->GetHandle();
- uint64_t* dynamicOffsets = nullptr;
+
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = mCommands.NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- descriptorSets.OnSetBindGroup(cmd->index, set, cmd->dynamicOffsetCount,
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
dynamicOffsets);
} break;
@@ -695,7 +757,7 @@ namespace dawn_native { namespace vulkan {
device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
}
- DescriptorSetTracker descriptorSets = {};
+ RenderDescriptorSetTracker descriptorSets = {};
RenderPipeline* lastPipeline = nullptr;
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
@@ -703,7 +765,7 @@ namespace dawn_native { namespace vulkan {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
draw->firstVertex, draw->firstInstance);
} break;
@@ -711,7 +773,7 @@ namespace dawn_native { namespace vulkan {
case Command::DrawIndexed: {
DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
draw->firstIndex, draw->baseVertex,
draw->firstInstance);
@@ -721,7 +783,7 @@ namespace dawn_native { namespace vulkan {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(draw->indirectBuffer)->GetHandle();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndirect(commands, indirectBuffer,
static_cast<VkDeviceSize>(draw->indirectOffset), 1,
0);
@@ -731,7 +793,7 @@ namespace dawn_native { namespace vulkan {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
VkBuffer indirectBuffer = ToBackend(draw->indirectBuffer)->GetHandle();
- descriptorSets.Apply(device, commands, VK_PIPELINE_BIND_POINT_GRAPHICS);
+ descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
device->fn.CmdDrawIndexedIndirect(
commands, indirectBuffer, static_cast<VkDeviceSize>(draw->indirectOffset),
1, 0);
@@ -786,13 +848,13 @@ namespace dawn_native { namespace vulkan {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
- VkDescriptorSet set = ToBackend(cmd->group.Get())->GetHandle();
- uint64_t* dynamicOffsets = nullptr;
+ BindGroup* bindGroup = ToBackend(cmd->group.Get());
+ uint32_t* dynamicOffsets = nullptr;
if (cmd->dynamicOffsetCount > 0) {
- dynamicOffsets = iter->NextData<uint64_t>(cmd->dynamicOffsetCount);
+ dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
- descriptorSets.OnSetBindGroup(cmd->index, set, cmd->dynamicOffsetCount,
+ descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
dynamicOffsets);
} break;
@@ -804,7 +866,7 @@ namespace dawn_native { namespace vulkan {
// and rebind if needed on pipeline change
ASSERT(lastPipeline != nullptr);
VkIndexType indexType =
- VulkanIndexType(lastPipeline->GetVertexInputDescriptor()->indexFormat);
+ VulkanIndexType(lastPipeline->GetVertexStateDescriptor()->indexFormat);
device->fn.CmdBindIndexBuffer(
commands, indexBuffer, static_cast<VkDeviceSize>(cmd->offset), indexType);
} break;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
index e1b2e5a660a..1b3994941e5 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
@@ -33,14 +33,14 @@ namespace dawn_native { namespace vulkan {
class CommandBuffer : public CommandBufferBase {
public:
- static CommandBuffer* Create(CommandEncoderBase* encoder,
+ static CommandBuffer* Create(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor);
~CommandBuffer();
MaybeError RecordCommands(CommandRecordingContext* recordingContext);
private:
- CommandBuffer(CommandEncoderBase* encoder, const CommandBufferDescriptor* descriptor);
+ CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
void RecordComputePass(CommandRecordingContext* recordingContext);
MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.cpp
new file mode 100644
index 00000000000..6aa26bbcfac
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.cpp
@@ -0,0 +1,41 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/DescriptorSetService.h"
+
+#include "dawn_native/vulkan/BindGroupLayoutVk.h"
+#include "dawn_native/vulkan/DeviceVk.h"
+
+namespace dawn_native { namespace vulkan {
+
+ DescriptorSetService::DescriptorSetService(Device* device) : mDevice(device) {
+ }
+
+ DescriptorSetService::~DescriptorSetService() {
+ ASSERT(mDeallocations.Empty());
+ }
+
+ void DescriptorSetService::AddDeferredDeallocation(BindGroupLayout* layout, size_t index) {
+ mDeallocations.Enqueue({layout, index}, mDevice->GetPendingCommandSerial());
+ }
+
+ void DescriptorSetService::Tick(Serial completedSerial) {
+ for (Deallocation& dealloc : mDeallocations.IterateUpTo(completedSerial)) {
+ dealloc.layout->FinishDeallocation(dealloc.index);
+ }
+
+ mDeallocations.ClearUpTo(completedSerial);
+ }
+
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.h
new file mode 100644
index 00000000000..c898b051bd2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetService.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_DESCRIPTORSETSERVICE_H_
+#define DAWNNATIVE_VULKAN_DESCRIPTORSETSERVICE_H_
+
+#include "common/SerialQueue.h"
+
+#include "dawn_native/vulkan/BindGroupLayoutVk.h"
+
+#include <vector>
+
+namespace dawn_native { namespace vulkan {
+
+ class BindGroupLayout;
+ class Device;
+
+ // Handles everything related to descriptor sets that isn't tied to a particular
+ // BindGroupLayout.
+ class DescriptorSetService {
+ public:
+ DescriptorSetService(Device* device);
+ ~DescriptorSetService();
+
+ // Will call layout->FinishDeallocation when the serial is passed.
+ void AddDeferredDeallocation(BindGroupLayout* layout, size_t index);
+
+ void Tick(Serial completedSerial);
+
+ private:
+ Device* mDevice;
+
+ struct Deallocation {
+ Ref<BindGroupLayout> layout;
+ size_t index;
+ };
+ SerialQueue<Deallocation> mDeallocations;
+ };
+
+}} // namespace dawn_native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_DESCRIPTORSETSERVICE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 15385ab1bad..a5f7788473b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -28,11 +28,13 @@
#include "dawn_native/vulkan/BufferVk.h"
#include "dawn_native/vulkan/CommandBufferVk.h"
#include "dawn_native/vulkan/ComputePipelineVk.h"
+#include "dawn_native/vulkan/DescriptorSetService.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
#include "dawn_native/vulkan/QueueVk.h"
#include "dawn_native/vulkan/RenderPassCache.h"
#include "dawn_native/vulkan/RenderPipelineVk.h"
+#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
#include "dawn_native/vulkan/SamplerVk.h"
#include "dawn_native/vulkan/ShaderModuleVk.h"
#include "dawn_native/vulkan/StagingBufferVk.h"
@@ -66,11 +68,11 @@ namespace dawn_native { namespace vulkan {
DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
GatherQueueFromDevice();
+ mDescriptorSetService = std::make_unique<DescriptorSetService>(this);
mDeleter = std::make_unique<FencedDeleter>(this);
mMapRequestTracker = std::make_unique<MapRequestTracker>(this);
- mMemoryAllocator = std::make_unique<MemoryAllocator>(this);
mRenderPassCache = std::make_unique<RenderPassCache>(this);
- mResourceAllocator = std::make_unique<MemoryResourceAllocator>(this);
+ mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
mExternalMemoryService = std::make_unique<external_memory::Service>(this);
mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
@@ -121,6 +123,9 @@ namespace dawn_native { namespace vulkan {
}
mUnusedCommands.clear();
+ // TODO(jiajie.hu@intel.com): In rare cases, a DAWN_TRY() failure may leave semaphores
+ // untagged for deletion. But for most of the time when everything goes well, these
+ // assertions can be helpful in catching bugs.
ASSERT(mRecordingContext.waitSemaphores.empty());
ASSERT(mRecordingContext.signalSemaphores.empty());
@@ -131,6 +136,7 @@ namespace dawn_native { namespace vulkan {
// Free services explicitly so that they can free Vulkan objects before vkDestroyDevice
mDynamicUploader = nullptr;
+ mDescriptorSetService = nullptr;
// Releasing the uploader enqueues buffers to be released.
// Call Tick() again to clear them before releasing the deleter.
@@ -138,7 +144,6 @@ namespace dawn_native { namespace vulkan {
mDeleter = nullptr;
mMapRequestTracker = nullptr;
- mMemoryAllocator = nullptr;
// The VkRenderPasses in the cache can be destroyed immediately since all commands referring
// to them are guaranteed to be finished executing.
@@ -162,7 +167,7 @@ namespace dawn_native { namespace vulkan {
ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
}
- CommandBufferBase* Device::CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
return CommandBuffer::Create(encoder, descriptor);
}
@@ -217,13 +222,14 @@ namespace dawn_native { namespace vulkan {
CheckPassedFences();
RecycleCompletedCommands();
+ mDescriptorSetService->Tick(mCompletedSerial);
mMapRequestTracker->Tick(mCompletedSerial);
// Uploader should tick before the resource allocator
// as it enqueues resources to be released.
mDynamicUploader->Deallocate(mCompletedSerial);
- mMemoryAllocator->Tick(mCompletedSerial);
+ mResourceMemoryAllocator->Tick(mCompletedSerial);
mDeleter->Tick(mCompletedSerial);
@@ -262,8 +268,8 @@ namespace dawn_native { namespace vulkan {
return mMapRequestTracker.get();
}
- MemoryAllocator* Device::GetMemoryAllocator() const {
- return mMemoryAllocator.get();
+ DescriptorSetService* Device::GetDescriptorSetService() const {
+ return mDescriptorSetService.get();
}
FencedDeleter* Device::GetFencedDeleter() const {
@@ -308,6 +314,15 @@ namespace dawn_native { namespace vulkan {
DAWN_TRY_ASSIGN(fence, GetUnusedFence());
DAWN_TRY(CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"));
+ // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
+ // soon as the current submission is finished.
+ for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
+ }
+ for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+ mDeleter->DeleteWhenUnused(semaphore);
+ }
+
mLastSubmittedSerial++;
mFencesInFlight.emplace(fence, mLastSubmittedSerial);
@@ -317,14 +332,6 @@ namespace dawn_native { namespace vulkan {
mRecordingContext = CommandRecordingContext();
DAWN_TRY(PrepareRecordingContext());
- for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
-
- for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
- mDeleter->DeleteWhenUnused(semaphore);
- }
-
return {};
}
@@ -348,6 +355,14 @@ namespace dawn_native { namespace vulkan {
extensionsToRequest.push_back(kExtensionNameKhrExternalMemoryFD);
usedKnobs.externalMemoryFD = true;
}
+ if (mDeviceInfo.externalMemoryDmaBuf) {
+ extensionsToRequest.push_back(kExtensionNameExtExternalMemoryDmaBuf);
+ usedKnobs.externalMemoryDmaBuf = true;
+ }
+ if (mDeviceInfo.imageDrmFormatModifier) {
+ extensionsToRequest.push_back(kExtensionNameExtImageDrmFormatModifier);
+ usedKnobs.imageDrmFormatModifier = true;
+ }
if (mDeviceInfo.externalMemoryZirconHandle) {
extensionsToRequest.push_back(kExtensionNameFuchsiaExternalMemory);
usedKnobs.externalMemoryZirconHandle = true;
@@ -388,8 +403,8 @@ namespace dawn_native { namespace vulkan {
// Find a universal queue family
{
- constexpr uint32_t kUniversalFlags =
- VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT;
+ // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
+ constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
int universalQueueFamily = -1;
for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
@@ -572,7 +587,7 @@ namespace dawn_native { namespace vulkan {
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
// buffer.
- ToBackend(destination)->TransitionUsageNow(recordingContext, dawn::BufferUsage::CopyDst);
+ ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkBufferCopy copy;
copy.srcOffset = sourceOffset;
@@ -588,6 +603,7 @@ namespace dawn_native { namespace vulkan {
MaybeError Device::ImportExternalImage(const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
+ VkImage image,
const std::vector<ExternalSemaphoreHandle>& waitHandles,
VkSemaphore* outSignalSemaphore,
VkDeviceMemory* outAllocation,
@@ -599,7 +615,7 @@ namespace dawn_native { namespace vulkan {
if (!mExternalSemaphoreService->Supported()) {
return DAWN_VALIDATION_ERROR("External semaphore usage not supported");
}
- if (!mExternalMemoryService->Supported(
+ if (!mExternalMemoryService->SupportsImportMemory(
VulkanImageFormat(textureDescriptor->format), VK_IMAGE_TYPE_2D,
VK_IMAGE_TILING_OPTIMAL,
VulkanImageUsage(textureDescriptor->usage,
@@ -613,9 +629,11 @@ namespace dawn_native { namespace vulkan {
mExternalSemaphoreService->CreateExportableSemaphore());
// Import the external image's memory
+ external_memory::MemoryImportParams importParams;
+ DAWN_TRY_ASSIGN(importParams,
+ mExternalMemoryService->GetMemoryImportParams(descriptor, image));
DAWN_TRY_ASSIGN(*outAllocation,
- mExternalMemoryService->ImportMemory(
- memoryHandle, descriptor->allocationSize, descriptor->memoryTypeIndex));
+ mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
// Import semaphores we have to wait on before using the texture
for (const ExternalSemaphoreHandle& handle : waitHandles) {
@@ -664,11 +682,20 @@ namespace dawn_native { namespace vulkan {
// Cleanup in case of a failure, the image creation doesn't acquire the external objects
// if a failure happems.
Texture* result = nullptr;
- if (ConsumedError(ImportExternalImage(descriptor, memoryHandle, waitHandles,
- &signalSemaphore, &allocation, &waitSemaphores)) ||
- ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
- signalSemaphore, allocation, waitSemaphores),
- &result)) {
+ // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
+ if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+ mExternalMemoryService.get()),
+ &result) ||
+ ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
+ waitHandles, &signalSemaphore, &allocation,
+ &waitSemaphores)) ||
+ ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
+ waitSemaphores))) {
+ // Delete the Texture if it was created
+ if (result != nullptr) {
+ delete result;
+ }
+
// Clear the signal semaphore
fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
@@ -688,20 +715,19 @@ namespace dawn_native { namespace vulkan {
ResultOrError<ResourceMemoryAllocation> Device::AllocateMemory(
VkMemoryRequirements requirements,
bool mappable) {
- // TODO(crbug.com/dawn/27): Support sub-allocation.
- ResourceMemoryAllocation allocation;
- DAWN_TRY_ASSIGN(allocation, mResourceAllocator->Allocate(requirements, mappable));
- return allocation;
+ return mResourceMemoryAllocator->Allocate(requirements, mappable);
}
- void Device::DeallocateMemory(ResourceMemoryAllocation& allocation) {
- if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
- return;
- }
- mResourceAllocator->Deallocate(allocation);
+ void Device::DeallocateMemory(ResourceMemoryAllocation* allocation) {
+ mResourceMemoryAllocator->Deallocate(allocation);
+ }
- // Invalidate the underlying resource heap in case the client accidentally
- // calls DeallocateMemory again using the same allocation.
- allocation.Invalidate();
+ int Device::FindBestMemoryTypeIndex(VkMemoryRequirements requirements, bool mappable) {
+ return mResourceMemoryAllocator->FindBestTypeIndex(requirements, mappable);
}
+
+ ResourceMemoryAllocator* Device::GetResourceMemoryAllocatorForTesting() const {
+ return mResourceMemoryAllocator.get();
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index 52cf7673340..e5210d6b9f5 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -22,7 +22,6 @@
#include "dawn_native/Device.h"
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/Forward.h"
-#include "dawn_native/vulkan/MemoryResourceAllocatorVk.h"
#include "dawn_native/vulkan/VulkanFunctions.h"
#include "dawn_native/vulkan/VulkanInfo.h"
@@ -36,11 +35,12 @@ namespace dawn_native { namespace vulkan {
class Adapter;
class BufferUploader;
+ class DescriptorSetService;
struct ExternalImageDescriptor;
class FencedDeleter;
class MapRequestTracker;
- class MemoryAllocator;
class RenderPassCache;
+ class ResourceMemoryAllocator;
class Device : public DeviceBase {
public:
@@ -59,9 +59,9 @@ namespace dawn_native { namespace vulkan {
VkQueue GetQueue() const;
BufferUploader* GetBufferUploader() const;
+ DescriptorSetService* GetDescriptorSetService() const;
FencedDeleter* GetFencedDeleter() const;
MapRequestTracker* GetMapRequestTracker() const;
- MemoryAllocator* GetMemoryAllocator() const;
RenderPassCache* GetRenderPassCache() const;
CommandRecordingContext* GetPendingRecordingContext();
@@ -77,7 +77,7 @@ namespace dawn_native { namespace vulkan {
ExternalSemaphoreHandle* outHandle);
// Dawn API
- CommandBufferBase* CreateCommandBuffer(CommandEncoderBase* encoder,
+ CommandBufferBase* CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) override;
Serial GetCompletedCommandSerial() const final override;
@@ -93,8 +93,11 @@ namespace dawn_native { namespace vulkan {
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
bool mappable);
+ void DeallocateMemory(ResourceMemoryAllocation* allocation);
- void DeallocateMemory(ResourceMemoryAllocation& allocation);
+ int FindBestMemoryTypeIndex(VkMemoryRequirements requirements, bool mappable);
+
+ ResourceMemoryAllocator* GetResourceMemoryAllocatorForTesting() const;
private:
ResultOrError<BindGroupBase*> CreateBindGroupImpl(
@@ -133,11 +136,10 @@ namespace dawn_native { namespace vulkan {
uint32_t mQueueFamily = 0;
VkQueue mQueue = VK_NULL_HANDLE;
- std::unique_ptr<MemoryResourceAllocator> mResourceAllocator;
-
+ std::unique_ptr<DescriptorSetService> mDescriptorSetService;
std::unique_ptr<FencedDeleter> mDeleter;
std::unique_ptr<MapRequestTracker> mMapRequestTracker;
- std::unique_ptr<MemoryAllocator> mMemoryAllocator;
+ std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
std::unique_ptr<RenderPassCache> mRenderPassCache;
std::unique_ptr<external_memory::Service> mExternalMemoryService;
@@ -171,6 +173,7 @@ namespace dawn_native { namespace vulkan {
MaybeError ImportExternalImage(const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
+ VkImage image,
const std::vector<ExternalSemaphoreHandle>& waitHandles,
VkSemaphore* outSignalSemaphore,
VkDeviceMemory* outAllocation,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
index 4dd1c246f9e..9b5a7a1dc73 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/Forward.h
@@ -29,7 +29,7 @@ namespace dawn_native { namespace vulkan {
class PipelineLayout;
class Queue;
class RenderPipeline;
- class ResourceMemory;
+ class ResourceHeap;
class Sampler;
class ShaderModule;
class StagingBuffer;
@@ -48,7 +48,7 @@ namespace dawn_native { namespace vulkan {
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using RenderPipelineType = RenderPipeline;
- using ResourceHeapType = ResourceMemory;
+ using ResourceHeapType = ResourceHeap;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using StagingBufferType = StagingBuffer;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp
deleted file mode 100644
index c977bde06cb..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.cpp
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/MemoryAllocator.h"
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-
-namespace dawn_native { namespace vulkan {
-
- DeviceMemoryAllocation::~DeviceMemoryAllocation() {
- ASSERT(mMemory == VK_NULL_HANDLE);
- }
-
- VkDeviceMemory DeviceMemoryAllocation::GetMemory() const {
- return mMemory;
- }
-
- size_t DeviceMemoryAllocation::GetMemoryOffset() const {
- return mOffset;
- }
-
- uint8_t* DeviceMemoryAllocation::GetMappedPointer() const {
- return mMappedPointer;
- }
-
- MemoryAllocator::MemoryAllocator(Device* device) : mDevice(device) {
- }
-
- MemoryAllocator::~MemoryAllocator() {
- }
-
- int MemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements, bool mappable) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
-
- // Find a suitable memory type for this allocation
- int bestType = -1;
- for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
- // Resource must support this memory type
- if ((requirements.memoryTypeBits & (1 << i)) == 0) {
- continue;
- }
-
- // Mappable resource must be host visible
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
- continue;
- }
-
- // Mappable must also be host coherent.
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
- continue;
- }
-
- // Found the first candidate memory type
- if (bestType == -1) {
- bestType = static_cast<int>(i);
- continue;
- }
-
- // For non-mappable resources, favor device local memory.
- if (!mappable) {
- if ((info.memoryTypes[bestType].propertyFlags &
- VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0 &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) !=
- 0) {
- bestType = static_cast<int>(i);
- continue;
- }
- }
-
- // All things equal favor the memory in the biggest heap
- VkDeviceSize bestTypeHeapSize =
- info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
- VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
- if (candidateHeapSize > bestTypeHeapSize) {
- bestType = static_cast<int>(i);
- continue;
- }
- }
-
- return bestType;
- }
-
- bool MemoryAllocator::Allocate(VkMemoryRequirements requirements,
- bool mappable,
- DeviceMemoryAllocation* allocation) {
- int bestType = FindBestTypeIndex(requirements, mappable);
- ASSERT(bestType >= 0);
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.allocationSize = requirements.size;
- allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- if (mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
- &allocatedMemory) != VK_SUCCESS) {
- return false;
- }
-
- void* mappedPointer = nullptr;
- if (mappable) {
- if (mDevice->fn.MapMemory(mDevice->GetVkDevice(), allocatedMemory, 0, requirements.size,
- 0, &mappedPointer) != VK_SUCCESS) {
- return false;
- }
- }
-
- allocation->mMemory = allocatedMemory;
- allocation->mOffset = 0;
- allocation->mMappedPointer = static_cast<uint8_t*>(mappedPointer);
-
- return true;
- }
-
- void MemoryAllocator::Free(DeviceMemoryAllocation* allocation) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(allocation->mMemory);
- allocation->mMemory = VK_NULL_HANDLE;
- allocation->mOffset = 0;
- allocation->mMappedPointer = nullptr;
- }
-
- void MemoryAllocator::Tick(Serial) {
- }
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.h
deleted file mode 100644
index 56d3350f1d6..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryAllocator.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2017 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWNNATIVE_VULKAN_MEMORYALLOCATOR_H_
-#define DAWNNATIVE_VULKAN_MEMORYALLOCATOR_H_
-
-#include "common/SerialQueue.h"
-#include "common/vulkan_platform.h"
-
-namespace dawn_native { namespace vulkan {
-
- class Device;
- class MemoryAllocator;
-
- class DeviceMemoryAllocation {
- public:
- ~DeviceMemoryAllocation();
- VkDeviceMemory GetMemory() const;
- size_t GetMemoryOffset() const;
- uint8_t* GetMappedPointer() const;
-
- private:
- friend class MemoryAllocator;
- VkDeviceMemory mMemory = VK_NULL_HANDLE;
- size_t mOffset = 0;
- uint8_t* mMappedPointer = nullptr;
- };
-
- class MemoryAllocator {
- public:
- MemoryAllocator(Device* device);
- ~MemoryAllocator();
-
- int FindBestTypeIndex(VkMemoryRequirements requirements, bool mappable);
- bool Allocate(VkMemoryRequirements requirements,
- bool mappable,
- DeviceMemoryAllocation* allocation);
- void Free(DeviceMemoryAllocation* allocation);
-
- void Tick(Serial finishedSerial);
-
- private:
- Device* mDevice = nullptr;
- };
-
-}} // namespace dawn_native::vulkan
-
-#endif // DAWNNATIVE_VULKAN_MEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp
deleted file mode 100644
index c86e6a1b782..00000000000
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_native/vulkan/DeviceVk.h"
-#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceMemoryVk.h"
-#include "dawn_native/vulkan/VulkanError.h"
-
-namespace dawn_native { namespace vulkan {
-
- MemoryResourceAllocator::MemoryResourceAllocator(Device* device) : mDevice(device) {
- }
-
- int MemoryResourceAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
- bool mappable) {
- const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
-
- // Find a suitable memory type for this allocation
- int bestType = -1;
- for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
- // Resource must support this memory type
- if ((requirements.memoryTypeBits & (1 << i)) == 0) {
- continue;
- }
-
- // Mappable resource must be host visible
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
- continue;
- }
-
- // Mappable must also be host coherent.
- if (mappable &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
- continue;
- }
-
- // Found the first candidate memory type
- if (bestType == -1) {
- bestType = static_cast<int>(i);
- continue;
- }
-
- // For non-mappable resources, favor device local memory.
- if (!mappable) {
- if ((info.memoryTypes[bestType].propertyFlags &
- VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0 &&
- (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) !=
- 0) {
- bestType = static_cast<int>(i);
- continue;
- }
- }
-
- // All things equal favor the memory in the biggest heap
- VkDeviceSize bestTypeHeapSize =
- info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
- VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
- if (candidateHeapSize > bestTypeHeapSize) {
- bestType = static_cast<int>(i);
- continue;
- }
- }
-
- return bestType;
- }
-
- ResultOrError<ResourceMemoryAllocation> MemoryResourceAllocator::Allocate(
- VkMemoryRequirements requirements,
- bool mappable) {
- int bestType = FindBestTypeIndex(requirements, mappable);
-
- // TODO(cwallez@chromium.org): I think the Vulkan spec guarantees this should never
- // happen
- if (bestType == -1) {
- return DAWN_DEVICE_LOST_ERROR("Unable to find memory for requirements.");
- }
-
- VkMemoryAllocateInfo allocateInfo;
- allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- allocateInfo.pNext = nullptr;
- allocateInfo.allocationSize = requirements.size;
- allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
-
- VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
- DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
- nullptr, &allocatedMemory),
- "vkAllocateMemory"));
-
- void* mappedPointer = nullptr;
- if (mappable) {
- DAWN_TRY(CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(), allocatedMemory,
- 0, requirements.size, 0, &mappedPointer),
- "vkMapMemory"));
- }
-
- AllocationInfo info;
- info.mMethod = AllocationMethod::kDirect;
-
- return ResourceMemoryAllocation(info, /*offset*/ 0, new ResourceMemory(allocatedMemory),
- static_cast<uint8_t*>(mappedPointer));
- }
-
- void MemoryResourceAllocator::Deallocate(ResourceMemoryAllocation& allocation) {
- mDevice->GetFencedDeleter()->DeleteWhenUnused(
- ToBackend(allocation.GetResourceHeap())->GetMemory());
- }
-}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
index bd7e499da60..e359d7033fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.cpp
@@ -52,7 +52,7 @@ namespace dawn_native { namespace vulkan {
// driver. Need to generalize
config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
- config->format = dawn::TextureFormat::BGRA8Unorm;
+ config->format = wgpu::TextureFormat::BGRA8Unorm;
config->minImageCount = 3;
// TODO(cwallez@chromium.org): This is upside down compared to what we want, at least
// on Linux
@@ -94,8 +94,8 @@ namespace dawn_native { namespace vulkan {
UpdateSurfaceConfig();
}
- DawnSwapChainError NativeSwapChainImpl::Configure(DawnTextureFormat format,
- DawnTextureUsage usage,
+ DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
uint32_t width,
uint32_t height) {
UpdateSurfaceConfig();
@@ -105,7 +105,7 @@ namespace dawn_native { namespace vulkan {
ASSERT(mInfo.capabilities.minImageExtent.height <= height);
ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
- ASSERT(format == static_cast<DawnTextureFormat>(GetPreferredFormat()));
+ ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
// TODO(cwallez@chromium.org): need to check usage works too
// Create the swapchain with the configuration we chose
@@ -121,7 +121,7 @@ namespace dawn_native { namespace vulkan {
createInfo.imageExtent.width = width;
createInfo.imageExtent.height = height;
createInfo.imageArrayLayers = 1;
- createInfo.imageUsage = VulkanImageUsage(static_cast<dawn::TextureUsage>(usage),
+ createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
mDevice->GetValidInternalFormat(mConfig.format));
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0;
@@ -236,7 +236,7 @@ namespace dawn_native { namespace vulkan {
return DAWN_SWAP_CHAIN_NO_ERROR;
}
- dawn::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+ wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
return mConfig.format;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h
index c213cb45c11..fe7a1820f51 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/NativeSwapChainImplVk.h
@@ -32,18 +32,18 @@ namespace dawn_native { namespace vulkan {
~NativeSwapChainImpl();
void Init(DawnWSIContextVulkan* context);
- DawnSwapChainError Configure(DawnTextureFormat format,
- DawnTextureUsage,
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage,
uint32_t width,
uint32_t height);
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
DawnSwapChainError Present();
- dawn::TextureFormat GetPreferredFormat() const;
+ wgpu::TextureFormat GetPreferredFormat() const;
struct ChosenConfig {
VkFormat nativeFormat;
- dawn::TextureFormat format;
+ wgpu::TextureFormat format;
VkColorSpaceKHR colorSpace;
VkSurfaceTransformFlagBitsKHR preTransform;
uint32_t minImageCount;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
index 558927c52a7..19ab88c7e3f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
@@ -17,6 +17,8 @@
#include "dawn_native/vulkan/CommandBufferVk.h"
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_platform/DawnPlatform.h"
+#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native { namespace vulkan {
@@ -33,10 +35,13 @@ namespace dawn_native { namespace vulkan {
device->Tick();
+ TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
+ "CommandBufferVk::RecordCommands");
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
for (uint32_t i = 0; i < commandCount; ++i) {
DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
}
+ TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
DAWN_TRY(device->SubmitPendingCommands());
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
index e6c79f1152c..1f3f940379a 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.cpp
@@ -23,11 +23,11 @@
namespace dawn_native { namespace vulkan {
namespace {
- VkAttachmentLoadOp VulkanAttachmentLoadOp(dawn::LoadOp op) {
+ VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
switch (op) {
- case dawn::LoadOp::Load:
+ case wgpu::LoadOp::Load:
return VK_ATTACHMENT_LOAD_OP_LOAD;
- case dawn::LoadOp::Clear:
+ case wgpu::LoadOp::Clear:
return VK_ATTACHMENT_LOAD_OP_CLEAR;
default:
UNREACHABLE();
@@ -38,8 +38,8 @@ namespace dawn_native { namespace vulkan {
// RenderPassCacheQuery
void RenderPassCacheQuery::SetColor(uint32_t index,
- dawn::TextureFormat format,
- dawn::LoadOp loadOp,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
bool hasResolveTarget) {
colorMask.set(index);
colorFormats[index] = format;
@@ -47,9 +47,9 @@ namespace dawn_native { namespace vulkan {
resolveTargetMask[index] = hasResolveTarget;
}
- void RenderPassCacheQuery::SetDepthStencil(dawn::TextureFormat format,
- dawn::LoadOp depthLoadOp,
- dawn::LoadOp stencilLoadOp) {
+ void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOp,
+ wgpu::LoadOp stencilLoadOp) {
hasDepthStencil = true;
depthStencilFormat = format;
this->depthLoadOp = depthLoadOp;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
index 3a4eeee6e1b..6af675ba41c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPassCache.h
@@ -35,23 +35,23 @@ namespace dawn_native { namespace vulkan {
// Use these helpers to build the query, they make sure all relevant data is initialized and
// masks set.
void SetColor(uint32_t index,
- dawn::TextureFormat format,
- dawn::LoadOp loadOp,
+ wgpu::TextureFormat format,
+ wgpu::LoadOp loadOp,
bool hasResolveTarget);
- void SetDepthStencil(dawn::TextureFormat format,
- dawn::LoadOp depthLoadOp,
- dawn::LoadOp stencilLoadOp);
+ void SetDepthStencil(wgpu::TextureFormat format,
+ wgpu::LoadOp depthLoadOp,
+ wgpu::LoadOp stencilLoadOp);
void SetSampleCount(uint32_t sampleCount);
std::bitset<kMaxColorAttachments> colorMask;
std::bitset<kMaxColorAttachments> resolveTargetMask;
- std::array<dawn::TextureFormat, kMaxColorAttachments> colorFormats;
- std::array<dawn::LoadOp, kMaxColorAttachments> colorLoadOp;
+ std::array<wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
+ std::array<wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
bool hasDepthStencil = false;
- dawn::TextureFormat depthStencilFormat;
- dawn::LoadOp depthLoadOp;
- dawn::LoadOp stencilLoadOp;
+ wgpu::TextureFormat depthStencilFormat;
+ wgpu::LoadOp depthLoadOp;
+ wgpu::LoadOp stencilLoadOp;
uint32_t sampleCount;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index 2e84df02de6..4b770a518aa 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -27,200 +27,200 @@ namespace dawn_native { namespace vulkan {
namespace {
- VkVertexInputRate VulkanInputRate(dawn::InputStepMode stepMode) {
+ VkVertexInputRate VulkanInputRate(wgpu::InputStepMode stepMode) {
switch (stepMode) {
- case dawn::InputStepMode::Vertex:
+ case wgpu::InputStepMode::Vertex:
return VK_VERTEX_INPUT_RATE_VERTEX;
- case dawn::InputStepMode::Instance:
+ case wgpu::InputStepMode::Instance:
return VK_VERTEX_INPUT_RATE_INSTANCE;
default:
UNREACHABLE();
}
}
- VkFormat VulkanVertexFormat(dawn::VertexFormat format) {
+ VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
switch (format) {
- case dawn::VertexFormat::UChar2:
+ case wgpu::VertexFormat::UChar2:
return VK_FORMAT_R8G8_UINT;
- case dawn::VertexFormat::UChar4:
+ case wgpu::VertexFormat::UChar4:
return VK_FORMAT_R8G8B8A8_UINT;
- case dawn::VertexFormat::Char2:
+ case wgpu::VertexFormat::Char2:
return VK_FORMAT_R8G8_SINT;
- case dawn::VertexFormat::Char4:
+ case wgpu::VertexFormat::Char4:
return VK_FORMAT_R8G8B8A8_SINT;
- case dawn::VertexFormat::UChar2Norm:
+ case wgpu::VertexFormat::UChar2Norm:
return VK_FORMAT_R8G8_UNORM;
- case dawn::VertexFormat::UChar4Norm:
+ case wgpu::VertexFormat::UChar4Norm:
return VK_FORMAT_R8G8B8A8_UNORM;
- case dawn::VertexFormat::Char2Norm:
+ case wgpu::VertexFormat::Char2Norm:
return VK_FORMAT_R8G8_SNORM;
- case dawn::VertexFormat::Char4Norm:
+ case wgpu::VertexFormat::Char4Norm:
return VK_FORMAT_R8G8B8A8_SNORM;
- case dawn::VertexFormat::UShort2:
+ case wgpu::VertexFormat::UShort2:
return VK_FORMAT_R16G16_UINT;
- case dawn::VertexFormat::UShort4:
+ case wgpu::VertexFormat::UShort4:
return VK_FORMAT_R16G16B16A16_UINT;
- case dawn::VertexFormat::Short2:
+ case wgpu::VertexFormat::Short2:
return VK_FORMAT_R16G16_SINT;
- case dawn::VertexFormat::Short4:
+ case wgpu::VertexFormat::Short4:
return VK_FORMAT_R16G16B16A16_SINT;
- case dawn::VertexFormat::UShort2Norm:
+ case wgpu::VertexFormat::UShort2Norm:
return VK_FORMAT_R16G16_UNORM;
- case dawn::VertexFormat::UShort4Norm:
+ case wgpu::VertexFormat::UShort4Norm:
return VK_FORMAT_R16G16B16A16_UNORM;
- case dawn::VertexFormat::Short2Norm:
+ case wgpu::VertexFormat::Short2Norm:
return VK_FORMAT_R16G16_SNORM;
- case dawn::VertexFormat::Short4Norm:
+ case wgpu::VertexFormat::Short4Norm:
return VK_FORMAT_R16G16B16A16_SNORM;
- case dawn::VertexFormat::Half2:
+ case wgpu::VertexFormat::Half2:
return VK_FORMAT_R16G16_SFLOAT;
- case dawn::VertexFormat::Half4:
+ case wgpu::VertexFormat::Half4:
return VK_FORMAT_R16G16B16A16_SFLOAT;
- case dawn::VertexFormat::Float:
+ case wgpu::VertexFormat::Float:
return VK_FORMAT_R32_SFLOAT;
- case dawn::VertexFormat::Float2:
+ case wgpu::VertexFormat::Float2:
return VK_FORMAT_R32G32_SFLOAT;
- case dawn::VertexFormat::Float3:
+ case wgpu::VertexFormat::Float3:
return VK_FORMAT_R32G32B32_SFLOAT;
- case dawn::VertexFormat::Float4:
+ case wgpu::VertexFormat::Float4:
return VK_FORMAT_R32G32B32A32_SFLOAT;
- case dawn::VertexFormat::UInt:
+ case wgpu::VertexFormat::UInt:
return VK_FORMAT_R32_UINT;
- case dawn::VertexFormat::UInt2:
+ case wgpu::VertexFormat::UInt2:
return VK_FORMAT_R32G32_UINT;
- case dawn::VertexFormat::UInt3:
+ case wgpu::VertexFormat::UInt3:
return VK_FORMAT_R32G32B32_UINT;
- case dawn::VertexFormat::UInt4:
+ case wgpu::VertexFormat::UInt4:
return VK_FORMAT_R32G32B32A32_UINT;
- case dawn::VertexFormat::Int:
+ case wgpu::VertexFormat::Int:
return VK_FORMAT_R32_SINT;
- case dawn::VertexFormat::Int2:
+ case wgpu::VertexFormat::Int2:
return VK_FORMAT_R32G32_SINT;
- case dawn::VertexFormat::Int3:
+ case wgpu::VertexFormat::Int3:
return VK_FORMAT_R32G32B32_SINT;
- case dawn::VertexFormat::Int4:
+ case wgpu::VertexFormat::Int4:
return VK_FORMAT_R32G32B32A32_SINT;
default:
UNREACHABLE();
}
}
- VkPrimitiveTopology VulkanPrimitiveTopology(dawn::PrimitiveTopology topology) {
+ VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
switch (topology) {
- case dawn::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::PointList:
return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
- case dawn::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::LineList:
return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
- case dawn::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::LineStrip:
return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
- case dawn::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::TriangleList:
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
default:
UNREACHABLE();
}
}
- bool ShouldEnablePrimitiveRestart(dawn::PrimitiveTopology topology) {
+ bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
// Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
// primitive restart be only enabled on primitive topologies that support restarting.
switch (topology) {
- case dawn::PrimitiveTopology::PointList:
- case dawn::PrimitiveTopology::LineList:
- case dawn::PrimitiveTopology::TriangleList:
+ case wgpu::PrimitiveTopology::PointList:
+ case wgpu::PrimitiveTopology::LineList:
+ case wgpu::PrimitiveTopology::TriangleList:
return false;
- case dawn::PrimitiveTopology::LineStrip:
- case dawn::PrimitiveTopology::TriangleStrip:
+ case wgpu::PrimitiveTopology::LineStrip:
+ case wgpu::PrimitiveTopology::TriangleStrip:
return true;
default:
UNREACHABLE();
}
}
- VkFrontFace VulkanFrontFace(dawn::FrontFace face) {
+ VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
switch (face) {
- case dawn::FrontFace::CCW:
+ case wgpu::FrontFace::CCW:
return VK_FRONT_FACE_COUNTER_CLOCKWISE;
- case dawn::FrontFace::CW:
+ case wgpu::FrontFace::CW:
return VK_FRONT_FACE_CLOCKWISE;
}
}
- VkCullModeFlagBits VulkanCullMode(dawn::CullMode mode) {
+ VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
switch (mode) {
- case dawn::CullMode::None:
+ case wgpu::CullMode::None:
return VK_CULL_MODE_NONE;
- case dawn::CullMode::Front:
+ case wgpu::CullMode::Front:
return VK_CULL_MODE_FRONT_BIT;
- case dawn::CullMode::Back:
+ case wgpu::CullMode::Back:
return VK_CULL_MODE_BACK_BIT;
}
}
- VkBlendFactor VulkanBlendFactor(dawn::BlendFactor factor) {
+ VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
switch (factor) {
- case dawn::BlendFactor::Zero:
+ case wgpu::BlendFactor::Zero:
return VK_BLEND_FACTOR_ZERO;
- case dawn::BlendFactor::One:
+ case wgpu::BlendFactor::One:
return VK_BLEND_FACTOR_ONE;
- case dawn::BlendFactor::SrcColor:
+ case wgpu::BlendFactor::SrcColor:
return VK_BLEND_FACTOR_SRC_COLOR;
- case dawn::BlendFactor::OneMinusSrcColor:
+ case wgpu::BlendFactor::OneMinusSrcColor:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
- case dawn::BlendFactor::SrcAlpha:
+ case wgpu::BlendFactor::SrcAlpha:
return VK_BLEND_FACTOR_SRC_ALPHA;
- case dawn::BlendFactor::OneMinusSrcAlpha:
+ case wgpu::BlendFactor::OneMinusSrcAlpha:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
- case dawn::BlendFactor::DstColor:
+ case wgpu::BlendFactor::DstColor:
return VK_BLEND_FACTOR_DST_COLOR;
- case dawn::BlendFactor::OneMinusDstColor:
+ case wgpu::BlendFactor::OneMinusDstColor:
return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
- case dawn::BlendFactor::DstAlpha:
+ case wgpu::BlendFactor::DstAlpha:
return VK_BLEND_FACTOR_DST_ALPHA;
- case dawn::BlendFactor::OneMinusDstAlpha:
+ case wgpu::BlendFactor::OneMinusDstAlpha:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
- case dawn::BlendFactor::SrcAlphaSaturated:
+ case wgpu::BlendFactor::SrcAlphaSaturated:
return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
- case dawn::BlendFactor::BlendColor:
+ case wgpu::BlendFactor::BlendColor:
return VK_BLEND_FACTOR_CONSTANT_COLOR;
- case dawn::BlendFactor::OneMinusBlendColor:
+ case wgpu::BlendFactor::OneMinusBlendColor:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
default:
UNREACHABLE();
}
}
- VkBlendOp VulkanBlendOperation(dawn::BlendOperation operation) {
+ VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
switch (operation) {
- case dawn::BlendOperation::Add:
+ case wgpu::BlendOperation::Add:
return VK_BLEND_OP_ADD;
- case dawn::BlendOperation::Subtract:
+ case wgpu::BlendOperation::Subtract:
return VK_BLEND_OP_SUBTRACT;
- case dawn::BlendOperation::ReverseSubtract:
+ case wgpu::BlendOperation::ReverseSubtract:
return VK_BLEND_OP_REVERSE_SUBTRACT;
- case dawn::BlendOperation::Min:
+ case wgpu::BlendOperation::Min:
return VK_BLEND_OP_MIN;
- case dawn::BlendOperation::Max:
+ case wgpu::BlendOperation::Max:
return VK_BLEND_OP_MAX;
default:
UNREACHABLE();
}
}
- VkColorComponentFlags VulkanColorWriteMask(dawn::ColorWriteMask mask,
+ VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
bool isDeclaredInFragmentShader) {
// Vulkan and Dawn color write masks match, static assert it and return the mask
- static_assert(static_cast<VkColorComponentFlagBits>(dawn::ColorWriteMask::Red) ==
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
VK_COLOR_COMPONENT_R_BIT,
"");
- static_assert(static_cast<VkColorComponentFlagBits>(dawn::ColorWriteMask::Green) ==
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
VK_COLOR_COMPONENT_G_BIT,
"");
- static_assert(static_cast<VkColorComponentFlagBits>(dawn::ColorWriteMask::Blue) ==
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
VK_COLOR_COMPONENT_B_BIT,
"");
- static_assert(static_cast<VkColorComponentFlagBits>(dawn::ColorWriteMask::Alpha) ==
+ static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
VK_COLOR_COMPONENT_A_BIT,
"");
@@ -247,23 +247,23 @@ namespace dawn_native { namespace vulkan {
return attachment;
}
- VkStencilOp VulkanStencilOp(dawn::StencilOperation op) {
+ VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
switch (op) {
- case dawn::StencilOperation::Keep:
+ case wgpu::StencilOperation::Keep:
return VK_STENCIL_OP_KEEP;
- case dawn::StencilOperation::Zero:
+ case wgpu::StencilOperation::Zero:
return VK_STENCIL_OP_ZERO;
- case dawn::StencilOperation::Replace:
+ case wgpu::StencilOperation::Replace:
return VK_STENCIL_OP_REPLACE;
- case dawn::StencilOperation::IncrementClamp:
+ case wgpu::StencilOperation::IncrementClamp:
return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
- case dawn::StencilOperation::DecrementClamp:
+ case wgpu::StencilOperation::DecrementClamp:
return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
- case dawn::StencilOperation::Invert:
+ case wgpu::StencilOperation::Invert:
return VK_STENCIL_OP_INVERT;
- case dawn::StencilOperation::IncrementWrap:
+ case wgpu::StencilOperation::IncrementWrap:
return VK_STENCIL_OP_INCREMENT_AND_WRAP;
- case dawn::StencilOperation::DecrementWrap:
+ case wgpu::StencilOperation::DecrementWrap:
return VK_STENCIL_OP_DECREMENT_AND_WRAP;
default:
UNREACHABLE();
@@ -279,7 +279,7 @@ namespace dawn_native { namespace vulkan {
// Depth writes only occur if depth is enabled
depthStencilState.depthTestEnable =
- (descriptor->depthCompare == dawn::CompareFunction::Always &&
+ (descriptor->depthCompare == wgpu::CompareFunction::Always &&
!descriptor->depthWriteEnabled)
? VK_FALSE
: VK_TRUE;
@@ -351,11 +351,9 @@ namespace dawn_native { namespace vulkan {
shaderStages[1].pName = descriptor->fragmentStage->entryPoint;
}
- std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> mBindings;
- std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> mAttributes;
- const VertexInputDescriptor* vertexInput = GetVertexInputDescriptor();
+ PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
- ComputeVertexInputDesc(vertexInput, &mBindings, &mAttributes);
+ ComputeVertexInputDesc(&tempAllocations);
VkPipelineInputAssemblyStateCreateInfo inputAssembly;
inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
@@ -464,12 +462,12 @@ namespace dawn_native { namespace vulkan {
RenderPassCacheQuery query;
for (uint32_t i : IterateBitSet(GetColorAttachmentsMask())) {
- query.SetColor(i, GetColorAttachmentFormat(i), dawn::LoadOp::Load, false);
+ query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load, false);
}
if (HasDepthStencilAttachment()) {
- query.SetDepthStencil(GetDepthStencilFormat(), dawn::LoadOp::Load,
- dawn::LoadOp::Load);
+ query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
+ wgpu::LoadOp::Load);
}
query.SetSampleCount(GetSampleCount());
@@ -507,32 +505,31 @@ namespace dawn_native { namespace vulkan {
}
VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
- const VertexInputDescriptor* vertexInput,
- std::array<VkVertexInputBindingDescription, kMaxVertexBuffers>* mBindings,
- std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes>* mAttributes) {
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
// Fill in the "binding info" that will be chained in the create info
uint32_t bindingCount = 0;
- for (uint32_t i : IterateBitSet(GetInputsSetMask())) {
- const auto& bindingInfo = GetInput(i);
+ for (uint32_t i : IterateBitSet(GetVertexBufferSlotsUsed())) {
+ const VertexBufferInfo& bindingInfo = GetVertexBuffer(i);
- auto& bindingDesc = (*mBindings)[bindingCount];
- bindingDesc.binding = i;
- bindingDesc.stride = bindingInfo.stride;
- bindingDesc.inputRate = VulkanInputRate(bindingInfo.stepMode);
+ VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
+ bindingDesc->binding = i;
+ bindingDesc->stride = bindingInfo.arrayStride;
+ bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
bindingCount++;
}
// Fill in the "attribute info" that will be chained in the create info
uint32_t attributeCount = 0;
- for (uint32_t i : IterateBitSet(GetAttributesSetMask())) {
- const auto& attributeInfo = GetAttribute(i);
+ for (uint32_t i : IterateBitSet(GetAttributeLocationsUsed())) {
+ const VertexAttributeInfo& attributeInfo = GetAttribute(i);
- auto& attributeDesc = (*mAttributes)[attributeCount];
- attributeDesc.location = i;
- attributeDesc.binding = attributeInfo.inputSlot;
- attributeDesc.format = VulkanVertexFormat(attributeInfo.format);
- attributeDesc.offset = attributeInfo.offset;
+ VkVertexInputAttributeDescription* attributeDesc =
+ &tempAllocations->attributes[attributeCount];
+ attributeDesc->location = i;
+ attributeDesc->binding = attributeInfo.vertexBufferSlot;
+ attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
+ attributeDesc->offset = attributeInfo.offset;
attributeCount++;
}
@@ -543,9 +540,9 @@ namespace dawn_native { namespace vulkan {
mCreateInfo.pNext = nullptr;
mCreateInfo.flags = 0;
mCreateInfo.vertexBindingDescriptionCount = bindingCount;
- mCreateInfo.pVertexBindingDescriptions = &(*mBindings)[0];
+ mCreateInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
mCreateInfo.vertexAttributeDescriptionCount = attributeCount;
- mCreateInfo.pVertexAttributeDescriptions = &(*mAttributes)[0];
+ mCreateInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
return mCreateInfo;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
index 9d2d300dd5a..6c61a6e5bc3 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.h
@@ -36,10 +36,12 @@ namespace dawn_native { namespace vulkan {
using RenderPipelineBase::RenderPipelineBase;
MaybeError Initialize(const RenderPipelineDescriptor* descriptor);
+ struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
+ std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
+ std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
+ };
VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
- const VertexInputDescriptor* vertexInput,
- std::array<VkVertexInputBindingDescription, kMaxVertexBuffers>* mBindings,
- std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes>* mAttributes);
+ PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
VkPipeline mHandle = VK_NULL_HANDLE;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp
new file mode 100644
index 00000000000..bf3b947bd44
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.cpp
@@ -0,0 +1,31 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/ResourceHeapVk.h"
+
+namespace dawn_native { namespace vulkan {
+
+ ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
+ : mMemory(memory), mMemoryType(memoryType) {
+ }
+
+ VkDeviceMemory ResourceHeap::GetMemory() const {
+ return mMemory;
+ }
+
+ size_t ResourceHeap::GetMemoryType() const {
+ return mMemoryType;
+ }
+
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h
index eab8b3202be..2bb909b5c8f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceHeapVk.h
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_
-#define DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_
+#ifndef DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
#include "common/vulkan_platform.h"
#include "dawn_native/ResourceHeap.h"
@@ -21,16 +21,19 @@
namespace dawn_native { namespace vulkan {
// Wrapper for physical memory used with or without a resource object.
- class ResourceMemory : public ResourceHeapBase {
+ class ResourceHeap : public ResourceHeapBase {
public:
- ResourceMemory(VkDeviceMemory memory);
- ~ResourceMemory() = default;
+ ResourceHeap(VkDeviceMemory memory, size_t memoryType);
+ ~ResourceHeap() = default;
VkDeviceMemory GetMemory() const;
+ size_t GetMemoryType() const;
private:
VkDeviceMemory mMemory = VK_NULL_HANDLE;
+ size_t mMemoryType = 0;
};
+
}} // namespace dawn_native::vulkan
-#endif // DAWNNATIVE_VULKAN_RESOURCEMEMORYVK_H_ \ No newline at end of file
+#endif // DAWNNATIVE_VULKAN_RESOURCEHEAPVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
new file mode 100644
index 00000000000..58dcd666d6c
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -0,0 +1,243 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
+
+#include "dawn_native/BuddyMemoryAllocator.h"
+#include "dawn_native/ResourceHeapAllocator.h"
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/ResourceHeapVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
+
+namespace dawn_native { namespace vulkan {
+
+ namespace {
+
+ // TODO(cwallez@chromium.org): This is a hardcoded heurstic to choose when to
+ // suballocate but it should ideally depend on the size of the memory heaps and other
+ // factors.
+ constexpr uint64_t kMaxBuddySystemSize = 32ull * 1024ull * 1024ull * 1024ull; // 32GB
+ constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull; // 4MB
+
+ // Have each bucket of the buddy system allocate at least some resource of the maximum
+ // size
+ constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
+
+ } // anonymous namespace
+
+ // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
+ // service suballocation requests, but for a single Vulkan memory type.
+
+ class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
+ public:
+ SingleTypeAllocator(Device* device, size_t memoryTypeIndex)
+ : mDevice(device),
+ mMemoryTypeIndex(memoryTypeIndex),
+ mBuddySystem(kMaxBuddySystemSize, kBuddyHeapsSize, this) {
+ }
+ ~SingleTypeAllocator() override = default;
+
+ ResultOrError<ResourceMemoryAllocation> AllocateMemory(
+ const VkMemoryRequirements& requirements) {
+ return mBuddySystem.Allocate(requirements.size, requirements.alignment);
+ }
+
+ void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
+ mBuddySystem.Deallocate(allocation);
+ }
+
+ // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override {
+ VkMemoryAllocateInfo allocateInfo;
+ allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ allocateInfo.pNext = nullptr;
+ allocateInfo.allocationSize = size;
+ allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ VkResult allocationResult = mDevice->fn.AllocateMemory(
+ mDevice->GetVkDevice(), &allocateInfo, nullptr, &allocatedMemory);
+
+ // Handle vkAllocateMemory error but differentiate OOM that we want to surface to
+ // the application.
+ if (allocationResult == VK_ERROR_OUT_OF_DEVICE_MEMORY) {
+ return DAWN_OUT_OF_MEMORY_ERROR("OOM while creating the Vkmemory");
+ }
+ DAWN_TRY(CheckVkSuccess(allocationResult, "vkAllocateMemory"));
+
+ ASSERT(allocatedMemory != VK_NULL_HANDLE);
+ return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
+ }
+
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
+ }
+
+ private:
+ Device* mDevice;
+ size_t mMemoryTypeIndex;
+ BuddyMemoryAllocator mBuddySystem;
+ };
+
+ // Implementation of ResourceMemoryAllocator
+
+ ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+ mAllocatorsPerType.reserve(info.memoryTypes.size());
+
+ for (size_t i = 0; i < info.memoryTypes.size(); i++) {
+ mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(mDevice, i));
+ }
+ }
+
+ ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
+
+ ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
+ const VkMemoryRequirements& requirements,
+ bool mappable) {
+ // The Vulkan spec guarantees at least on memory type is valid.
+ int memoryType = FindBestTypeIndex(requirements, mappable);
+ ASSERT(memoryType >= 0);
+
+ VkDeviceSize size = requirements.size;
+
+ // If the resource is too big, allocate memory just for it.
+ // Also allocate mappable resources separately because at the moment the mapped pointer
+ // is part of the resource and not the heap, which doesn't match the Vulkan model.
+ // TODO(cwallez@chromium.org): allow sub-allocating mappable resources, maybe.
+ if (requirements.size >= kMaxSizeForSubAllocation || mappable) {
+ std::unique_ptr<ResourceHeapBase> resourceHeap;
+ DAWN_TRY_ASSIGN(resourceHeap,
+ mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
+
+ void* mappedPointer = nullptr;
+ if (mappable) {
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+ ToBackend(resourceHeap.get())->GetMemory(),
+ 0, size, 0, &mappedPointer),
+ "vkMapMemory"));
+ }
+
+ AllocationInfo info;
+ info.mMethod = AllocationMethod::kDirect;
+ return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+ static_cast<uint8_t*>(mappedPointer));
+ } else {
+ return mAllocatorsPerType[memoryType]->AllocateMemory(requirements);
+ }
+ }
+
+ void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
+ switch (allocation->GetInfo().mMethod) {
+ // Some memory allocation can never be initialized, for example when wrapping
+ // swapchain VkImages with a Texture.
+ case AllocationMethod::kInvalid:
+ break;
+
+ // For direct allocation we can put the memory for deletion immediately and the fence
+ // deleter will make sure the resources are freed before the memory.
+ case AllocationMethod::kDirect:
+ mDevice->GetFencedDeleter()->DeleteWhenUnused(
+ ToBackend(allocation->GetResourceHeap())->GetMemory());
+ break;
+
+ // Suballocations aren't freed immediately, otherwise another resource allocation could
+ // happen just after that aliases the old one and would require a barrier.
+ // TODO(cwallez@chromium.org): Maybe we can produce the correct barriers to reduce the
+ // latency to reclaim memory.
+ case AllocationMethod::kSubAllocated:
+ mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Invalidate the underlying resource heap in case the client accidentally
+ // calls DeallocateMemory again using the same allocation.
+ allocation->Invalidate();
+ }
+
+ void ResourceMemoryAllocator::Tick(Serial completedSerial) {
+ for (const ResourceMemoryAllocation& allocation :
+ mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
+ ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+ size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+
+ mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
+ }
+
+ mSubAllocationsToDelete.ClearUpTo(completedSerial);
+ }
+
+ int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
+ bool mappable) {
+ const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+
+ // Find a suitable memory type for this allocation
+ int bestType = -1;
+ for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+ // Resource must support this memory type
+ if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+ continue;
+ }
+
+ // Mappable resource must be host visible
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ continue;
+ }
+
+ // Mappable must also be host coherent.
+ if (mappable &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+ continue;
+ }
+
+ // Found the first candidate memory type
+ if (bestType == -1) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+
+ // For non-mappable resources, favor device local memory.
+ if (!mappable) {
+ if ((info.memoryTypes[bestType].propertyFlags &
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0 &&
+ (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) !=
+ 0) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+ }
+
+ // All things equal favor the memory in the biggest heap
+ VkDeviceSize bestTypeHeapSize =
+ info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+ VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+ if (candidateHeapSize > bestTypeHeapSize) {
+ bestType = static_cast<int>(i);
+ continue;
+ }
+ }
+
+ return bestType;
+ }
+
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
index b26d12a50b9..88f6d4e0c66 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/MemoryResourceAllocatorVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
@@ -12,32 +12,43 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#ifndef DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
-#define DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
+#ifndef DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+#define DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
+#include "common/SerialQueue.h"
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
#include "dawn_native/ResourceMemoryAllocation.h"
+#include <memory>
+#include <vector>
+
namespace dawn_native { namespace vulkan {
class Device;
- class MemoryResourceAllocator {
+ class ResourceMemoryAllocator {
public:
- MemoryResourceAllocator(Device* device);
- ~MemoryResourceAllocator() = default;
+ ResourceMemoryAllocator(Device* device);
+ ~ResourceMemoryAllocator();
- ResultOrError<ResourceMemoryAllocation> Allocate(VkMemoryRequirements requirements,
+ ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
bool mappable);
- void Deallocate(ResourceMemoryAllocation& allocation);
+ void Deallocate(ResourceMemoryAllocation* allocation);
+
+ void Tick(Serial completedSerial);
- private:
int FindBestTypeIndex(VkMemoryRequirements requirements, bool mappable);
+ private:
Device* mDevice;
+
+ class SingleTypeAllocator;
+ std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
+
+ SerialQueue<ResourceMemoryAllocation> mSubAllocationsToDelete;
};
}} // namespace dawn_native::vulkan
-#endif // DAWNNATIVE_VULKAN_MEMORYRESOURCEALLOCATORVK_H_
+#endif // DAWNNATIVE_VULKAN_RESOURCEMEMORYALLOCATORVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
index 633fb5dde4b..05baf71fc92 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.cpp
@@ -22,35 +22,35 @@
namespace dawn_native { namespace vulkan {
namespace {
- VkSamplerAddressMode VulkanSamplerAddressMode(dawn::AddressMode mode) {
+ VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
switch (mode) {
- case dawn::AddressMode::Repeat:
+ case wgpu::AddressMode::Repeat:
return VK_SAMPLER_ADDRESS_MODE_REPEAT;
- case dawn::AddressMode::MirrorRepeat:
+ case wgpu::AddressMode::MirrorRepeat:
return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
- case dawn::AddressMode::ClampToEdge:
+ case wgpu::AddressMode::ClampToEdge:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
default:
UNREACHABLE();
}
}
- VkFilter VulkanSamplerFilter(dawn::FilterMode filter) {
+ VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
switch (filter) {
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return VK_FILTER_LINEAR;
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return VK_FILTER_NEAREST;
default:
UNREACHABLE();
}
}
- VkSamplerMipmapMode VulkanMipMapMode(dawn::FilterMode filter) {
+ VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
switch (filter) {
- case dawn::FilterMode::Linear:
+ case wgpu::FilterMode::Linear:
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
- case dawn::FilterMode::Nearest:
+ case wgpu::FilterMode::Nearest:
return VK_SAMPLER_MIPMAP_MODE_NEAREST;
default:
UNREACHABLE();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
index 9ea7e0fb924..e7b88747c0e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SamplerVk.h
@@ -19,10 +19,11 @@
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
-#include "dawn_native/vulkan/MemoryAllocator.h"
namespace dawn_native { namespace vulkan {
+ class Device;
+
class Sampler : public SamplerBase {
public:
static ResultOrError<Sampler*> Create(Device* device, const SamplerDescriptor* descriptor);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
index b741d7b901d..96e31270d5d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.cpp
@@ -25,16 +25,35 @@ namespace dawn_native { namespace vulkan {
// static
ResultOrError<ShaderModule*> ShaderModule::Create(Device* device,
const ShaderModuleDescriptor* descriptor) {
- std::unique_ptr<ShaderModule> module = std::make_unique<ShaderModule>(device, descriptor);
+ std::unique_ptr<ShaderModule> module(new ShaderModule(device, descriptor));
+ if (!module)
+ return DAWN_VALIDATION_ERROR("Unable to create ShaderModule");
DAWN_TRY(module->Initialize(descriptor));
return module.release();
}
+ ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+ : ShaderModuleBase(device, descriptor) {
+ }
+
MaybeError ShaderModule::Initialize(const ShaderModuleDescriptor* descriptor) {
// Use SPIRV-Cross to extract info from the SPIRV even if Vulkan consumes SPIRV. We want to
// have a translation step eventually anyway.
- spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
- ExtractSpirvInfo(compiler);
+ if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
+ shaderc_spvc::CompileOptions options;
+ shaderc_spvc_status status =
+ mSpvcContext.InitializeForGlsl(descriptor->code, descriptor->codeSize, options);
+ if (status != shaderc_spvc_status_success) {
+ return DAWN_VALIDATION_ERROR("Unable to initialize instance of spvc");
+ }
+
+ spirv_cross::Compiler* compiler =
+ reinterpret_cast<spirv_cross::Compiler*>(mSpvcContext.GetCompiler());
+ ExtractSpirvInfo(*compiler);
+ } else {
+ spirv_cross::Compiler compiler(descriptor->code, descriptor->codeSize);
+ ExtractSpirvInfo(compiler);
+ }
VkShaderModuleCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
index f328dac6b16..01643920b11 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ShaderModuleVk.h
@@ -33,7 +33,7 @@ namespace dawn_native { namespace vulkan {
VkShaderModule GetHandle() const;
private:
- using ShaderModuleBase::ShaderModuleBase;
+ ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
MaybeError Initialize(const ShaderModuleDescriptor* descriptor);
VkShaderModule mHandle = VK_NULL_HANDLE;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
index 8ae2ccd4757..42623188e3e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/StagingBufferVk.cpp
@@ -15,7 +15,7 @@
#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
-#include "dawn_native/vulkan/ResourceMemoryVk.h"
+#include "dawn_native/vulkan/ResourceHeapVk.h"
#include "dawn_native/vulkan/VulkanError.h"
namespace dawn_native { namespace vulkan {
@@ -61,7 +61,7 @@ namespace dawn_native { namespace vulkan {
StagingBuffer::~StagingBuffer() {
mMappedPointer = nullptr;
mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
- mDevice->DeallocateMemory(mAllocation);
+ mDevice->DeallocateMemory(&mAllocation);
}
VkBuffer StagingBuffer::GetBufferHandle() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index b465bd0a388..52a1f728f09 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -30,8 +30,8 @@ namespace dawn_native { namespace vulkan {
DawnWSIContextVulkan wsiContext = {};
im.Init(im.userData, &wsiContext);
- ASSERT(im.textureUsage != DAWN_TEXTURE_USAGE_NONE);
- mTextureUsage = static_cast<dawn::TextureUsage>(im.textureUsage);
+ ASSERT(im.textureUsage != WGPUTextureUsage_None);
+ mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
}
SwapChain::~SwapChain() {
@@ -43,7 +43,7 @@ namespace dawn_native { namespace vulkan {
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
- GetDevice()->HandleError(dawn::ErrorType::Unknown, error);
+ GetDevice()->HandleError(wgpu::ErrorType::Unknown, error);
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
index 339d9da0096..1d8ce43ac72 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.h
@@ -35,7 +35,7 @@ namespace dawn_native { namespace vulkan {
MaybeError OnBeforePresent(TextureBase* texture) override;
private:
- dawn::TextureUsage mTextureUsage;
+ wgpu::TextureUsage mTextureUsage;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index 0cd4d053ca4..5d119764550 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -24,6 +24,7 @@
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/ResourceHeapVk.h"
#include "dawn_native/vulkan/StagingBufferVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
@@ -34,9 +35,9 @@ namespace dawn_native { namespace vulkan {
// Converts an Dawn texture dimension to a Vulkan image type.
// Note that in Vulkan dimensionality is only 1D, 2D, 3D. Arrays and cube maps are expressed
// via the array size and a "cubemap compatible" flag.
- VkImageType VulkanImageType(dawn::TextureDimension dimension) {
+ VkImageType VulkanImageType(wgpu::TextureDimension dimension) {
switch (dimension) {
- case dawn::TextureDimension::e2D:
+ case wgpu::TextureDimension::e2D:
return VK_IMAGE_TYPE_2D;
default:
UNREACHABLE();
@@ -45,15 +46,15 @@ namespace dawn_native { namespace vulkan {
// Converts an Dawn texture dimension to a Vulkan image view type.
// Contrary to image types, image view types include arrayness and cubemapness
- VkImageViewType VulkanImageViewType(dawn::TextureViewDimension dimension) {
+ VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
switch (dimension) {
- case dawn::TextureViewDimension::e2D:
+ case wgpu::TextureViewDimension::e2D:
return VK_IMAGE_VIEW_TYPE_2D;
- case dawn::TextureViewDimension::e2DArray:
+ case wgpu::TextureViewDimension::e2DArray:
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
- case dawn::TextureViewDimension::Cube:
+ case wgpu::TextureViewDimension::Cube:
return VK_IMAGE_VIEW_TYPE_CUBE;
- case dawn::TextureViewDimension::CubeArray:
+ case wgpu::TextureViewDimension::CubeArray:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
default:
UNREACHABLE();
@@ -61,22 +62,22 @@ namespace dawn_native { namespace vulkan {
}
// Computes which vulkan access type could be required for the given Dawn usage.
- VkAccessFlags VulkanAccessFlags(dawn::TextureUsage usage, const Format& format) {
+ VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
VkAccessFlags flags = 0;
- if (usage & dawn::TextureUsage::CopySrc) {
+ if (usage & wgpu::TextureUsage::CopySrc) {
flags |= VK_ACCESS_TRANSFER_READ_BIT;
}
- if (usage & dawn::TextureUsage::CopyDst) {
+ if (usage & wgpu::TextureUsage::CopyDst) {
flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
- if (usage & dawn::TextureUsage::Sampled) {
+ if (usage & wgpu::TextureUsage::Sampled) {
flags |= VK_ACCESS_SHADER_READ_BIT;
}
- if (usage & dawn::TextureUsage::Storage) {
+ if (usage & wgpu::TextureUsage::Storage) {
flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
}
- if (usage & dawn::TextureUsage::OutputAttachment) {
+ if (usage & wgpu::TextureUsage::OutputAttachment) {
if (format.HasDepthOrStencil()) {
flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
@@ -85,7 +86,7 @@ namespace dawn_native { namespace vulkan {
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
}
- if (usage & dawn::TextureUsage::Present) {
+ if (usage & wgpu::TextureUsage::Present) {
// There is no access flag for present because the VK_KHR_SWAPCHAIN extension says
// that vkQueuePresentKHR makes the memory of the image visible to the presentation
// engine. There's also a note explicitly saying dstAccessMask should be 0. On the
@@ -98,38 +99,38 @@ namespace dawn_native { namespace vulkan {
}
// Chooses which Vulkan image layout should be used for the given Dawn usage
- VkImageLayout VulkanImageLayout(dawn::TextureUsage usage, const Format& format) {
- if (usage == dawn::TextureUsage::None) {
+ VkImageLayout VulkanImageLayout(wgpu::TextureUsage usage, const Format& format) {
+ if (usage == wgpu::TextureUsage::None) {
return VK_IMAGE_LAYOUT_UNDEFINED;
}
- if (!dawn::HasZeroOrOneBits(usage)) {
+ if (!wgpu::HasZeroOrOneBits(usage)) {
return VK_IMAGE_LAYOUT_GENERAL;
}
// Usage has a single bit so we can switch on its value directly.
switch (usage) {
- case dawn::TextureUsage::CopyDst:
+ case wgpu::TextureUsage::CopyDst:
return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
- case dawn::TextureUsage::Sampled:
+ case wgpu::TextureUsage::Sampled:
return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// Vulkan texture copy functions require the image to be in _one_ known layout.
// Depending on whether parts of the texture have been transitioned to only
// CopySrc or a combination with something else, the texture could be in a
// combination of GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we
// make CopySrc use GENERAL.
- case dawn::TextureUsage::CopySrc:
+ case wgpu::TextureUsage::CopySrc:
// Writable storage textures must use general. If we could know the texture is read
// only we could use SHADER_READ_ONLY_OPTIMAL
- case dawn::TextureUsage::Storage:
+ case wgpu::TextureUsage::Storage:
return VK_IMAGE_LAYOUT_GENERAL;
- case dawn::TextureUsage::OutputAttachment:
+ case wgpu::TextureUsage::OutputAttachment:
if (format.HasDepthOrStencil()) {
return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else {
return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
- case dawn::TextureUsage::Present:
+ case wgpu::TextureUsage::Present:
return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
default:
UNREACHABLE();
@@ -137,23 +138,23 @@ namespace dawn_native { namespace vulkan {
}
// Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
- VkPipelineStageFlags VulkanPipelineStage(dawn::TextureUsage usage, const Format& format) {
+ VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
VkPipelineStageFlags flags = 0;
- if (usage == dawn::TextureUsage::None) {
+ if (usage == wgpu::TextureUsage::None) {
// This only happens when a texture is initially created (and for srcAccessMask) in
// which case there is no need to wait on anything to stop accessing this texture.
return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
}
- if (usage & (dawn::TextureUsage::CopySrc | dawn::TextureUsage::CopyDst)) {
+ if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
}
- if (usage & (dawn::TextureUsage::Sampled | dawn::TextureUsage::Storage)) {
+ if (usage & (wgpu::TextureUsage::Sampled | wgpu::TextureUsage::Storage)) {
flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
}
- if (usage & dawn::TextureUsage::OutputAttachment) {
+ if (usage & wgpu::TextureUsage::OutputAttachment) {
if (format.HasDepthOrStencil()) {
flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
@@ -163,7 +164,7 @@ namespace dawn_native { namespace vulkan {
flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
}
- if (usage & dawn::TextureUsage::Present) {
+ if (usage & wgpu::TextureUsage::Present) {
// There is no pipeline stage for present but a pipeline stage is required so we use
// "bottom of pipe" to block as little as possible and vkQueuePresentKHR will make
// the memory visible to the presentation engine. The spec explicitly mentions that
@@ -199,136 +200,120 @@ namespace dawn_native { namespace vulkan {
return {extent.width, extent.height, extent.depth};
}
- bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
- const VkImageCreateInfo& imageCreateInfo) {
- ASSERT(device);
-
- VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
- VkImageFormatProperties properties;
- if (device->fn.GetPhysicalDeviceImageFormatProperties(
- physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
- imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
- &properties) != VK_SUCCESS) {
- UNREACHABLE();
- }
-
- return properties.sampleCounts & imageCreateInfo.samples;
- }
-
} // namespace
// Converts Dawn texture format to Vulkan formats.
- VkFormat VulkanImageFormat(dawn::TextureFormat format) {
+ VkFormat VulkanImageFormat(wgpu::TextureFormat format) {
switch (format) {
- case dawn::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Unorm:
return VK_FORMAT_R8_UNORM;
- case dawn::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Snorm:
return VK_FORMAT_R8_SNORM;
- case dawn::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Uint:
return VK_FORMAT_R8_UINT;
- case dawn::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R8Sint:
return VK_FORMAT_R8_SINT;
- case dawn::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Uint:
return VK_FORMAT_R16_UINT;
- case dawn::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Sint:
return VK_FORMAT_R16_SINT;
- case dawn::TextureFormat::R16Float:
+ case wgpu::TextureFormat::R16Float:
return VK_FORMAT_R16_SFLOAT;
- case dawn::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Unorm:
return VK_FORMAT_R8G8_UNORM;
- case dawn::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Snorm:
return VK_FORMAT_R8G8_SNORM;
- case dawn::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Uint:
return VK_FORMAT_R8G8_UINT;
- case dawn::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::RG8Sint:
return VK_FORMAT_R8G8_SINT;
- case dawn::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Uint:
return VK_FORMAT_R32_UINT;
- case dawn::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::R32Sint:
return VK_FORMAT_R32_SINT;
- case dawn::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Float:
return VK_FORMAT_R32_SFLOAT;
- case dawn::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Uint:
return VK_FORMAT_R16G16_UINT;
- case dawn::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Sint:
return VK_FORMAT_R16G16_SINT;
- case dawn::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RG16Float:
return VK_FORMAT_R16G16_SFLOAT;
- case dawn::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8Unorm:
return VK_FORMAT_R8G8B8A8_UNORM;
- case dawn::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
return VK_FORMAT_R8G8B8A8_SRGB;
- case dawn::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Snorm:
return VK_FORMAT_R8G8B8A8_SNORM;
- case dawn::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Uint:
return VK_FORMAT_R8G8B8A8_UINT;
- case dawn::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::RGBA8Sint:
return VK_FORMAT_R8G8B8A8_SINT;
- case dawn::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8Unorm:
return VK_FORMAT_B8G8R8A8_UNORM;
- case dawn::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
return VK_FORMAT_B8G8R8A8_SRGB;
- case dawn::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RGB10A2Unorm:
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
- case dawn::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Float:
return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
- case dawn::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Uint:
return VK_FORMAT_R32G32_UINT;
- case dawn::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RG32Sint:
return VK_FORMAT_R32G32_SINT;
- case dawn::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Float:
return VK_FORMAT_R32G32_SFLOAT;
- case dawn::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Uint:
return VK_FORMAT_R16G16B16A16_UINT;
- case dawn::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Sint:
return VK_FORMAT_R16G16B16A16_SINT;
- case dawn::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA16Float:
return VK_FORMAT_R16G16B16A16_SFLOAT;
- case dawn::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Uint:
return VK_FORMAT_R32G32B32A32_UINT;
- case dawn::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::RGBA32Sint:
return VK_FORMAT_R32G32B32A32_SINT;
- case dawn::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Float:
return VK_FORMAT_R32G32B32A32_SFLOAT;
- case dawn::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth32Float:
return VK_FORMAT_D32_SFLOAT;
- case dawn::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24Plus:
return VK_FORMAT_D32_SFLOAT;
- case dawn::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
return VK_FORMAT_D32_SFLOAT_S8_UINT;
- case dawn::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnorm:
return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
- case dawn::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
- case dawn::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
return VK_FORMAT_BC2_UNORM_BLOCK;
- case dawn::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
return VK_FORMAT_BC2_SRGB_BLOCK;
- case dawn::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
return VK_FORMAT_BC3_UNORM_BLOCK;
- case dawn::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
return VK_FORMAT_BC3_SRGB_BLOCK;
- case dawn::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
return VK_FORMAT_BC4_SNORM_BLOCK;
- case dawn::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RUnorm:
return VK_FORMAT_BC4_UNORM_BLOCK;
- case dawn::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
return VK_FORMAT_BC5_SNORM_BLOCK;
- case dawn::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGUnorm:
return VK_FORMAT_BC5_UNORM_BLOCK;
- case dawn::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
return VK_FORMAT_BC6H_SFLOAT_BLOCK;
- case dawn::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
return VK_FORMAT_BC6H_UFLOAT_BLOCK;
- case dawn::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
return VK_FORMAT_BC7_UNORM_BLOCK;
- case dawn::TextureFormat::BC7RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return VK_FORMAT_BC7_SRGB_BLOCK;
default:
@@ -338,22 +323,22 @@ namespace dawn_native { namespace vulkan {
// Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
// between color and depth attachment usages.
- VkImageUsageFlags VulkanImageUsage(dawn::TextureUsage usage, const Format& format) {
+ VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
VkImageUsageFlags flags = 0;
- if (usage & dawn::TextureUsage::CopySrc) {
+ if (usage & wgpu::TextureUsage::CopySrc) {
flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
- if (usage & dawn::TextureUsage::CopyDst) {
+ if (usage & wgpu::TextureUsage::CopyDst) {
flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
- if (usage & dawn::TextureUsage::Sampled) {
+ if (usage & wgpu::TextureUsage::Sampled) {
flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
}
- if (usage & dawn::TextureUsage::Storage) {
+ if (usage & wgpu::TextureUsage::Storage) {
flags |= VK_IMAGE_USAGE_STORAGE_BIT;
}
- if (usage & dawn::TextureUsage::OutputAttachment) {
+ if (usage & wgpu::TextureUsage::OutputAttachment) {
if (format.HasDepthOrStencil()) {
flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
} else {
@@ -377,7 +362,7 @@ namespace dawn_native { namespace vulkan {
MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
const TextureDescriptor* descriptor) {
- if (descriptor->dimension != dawn::TextureDimension::e2D) {
+ if (descriptor->dimension != wgpu::TextureDimension::e2D) {
return DAWN_VALIDATION_ERROR("Texture must be 2D");
}
@@ -396,6 +381,22 @@ namespace dawn_native { namespace vulkan {
return {};
}
+ bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo) {
+ ASSERT(device);
+
+ VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
+ VkImageFormatProperties properties;
+ if (device->fn.GetPhysicalDeviceImageFormatProperties(
+ physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
+ imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
+ &properties) != VK_SUCCESS) {
+ UNREACHABLE();
+ }
+
+ return properties.sampleCounts & imageCreateInfo.samples;
+ }
+
// static
ResultOrError<Texture*> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
std::unique_ptr<Texture> texture =
@@ -405,16 +406,14 @@ namespace dawn_native { namespace vulkan {
}
// static
- ResultOrError<Texture*> Texture::CreateFromExternal(Device* device,
- const ExternalImageDescriptor* descriptor,
- const TextureDescriptor* textureDescriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores) {
+ ResultOrError<Texture*> Texture::CreateFromExternal(
+ Device* device,
+ const ExternalImageDescriptor* descriptor,
+ const TextureDescriptor* textureDescriptor,
+ external_memory::Service* externalMemoryService) {
std::unique_ptr<Texture> texture =
std::make_unique<Texture>(device, textureDescriptor, TextureState::OwnedInternal);
- DAWN_TRY(texture->InitializeFromExternal(
- descriptor, signalSemaphore, externalMemoryAllocation, std::move((waitSemaphores))));
+ DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
return texture.release();
}
@@ -460,14 +459,13 @@ namespace dawn_native { namespace vulkan {
VkMemoryRequirements requirements;
device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
- if (!device->GetMemoryAllocator()->Allocate(requirements, false, &mMemoryAllocation)) {
- return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture");
- }
+ DAWN_TRY_ASSIGN(mMemoryAllocation, device->AllocateMemory(requirements, false));
- DAWN_TRY(CheckVkSuccess(device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
- mMemoryAllocation.GetMemory(),
- mMemoryAllocation.GetMemoryOffset()),
- "BindImageMemory"));
+ DAWN_TRY(CheckVkSuccess(
+ device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+ ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+ mMemoryAllocation.GetOffset()),
+ "BindImageMemory"));
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(), 0,
@@ -485,46 +483,42 @@ namespace dawn_native { namespace vulkan {
// Internally managed, but imported from external handle
MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptor* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores) {
- mExternalState = ExternalState::PendingAcquire;
- Device* device = ToBackend(GetDevice());
-
- VkImageCreateInfo createInfo = {};
- createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- createInfo.pNext = nullptr;
- createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
- createInfo.imageType = VulkanImageType(GetDimension());
- createInfo.format = VulkanImageFormat(GetFormat().format);
- createInfo.extent = VulkanExtent3D(GetSize());
- createInfo.mipLevels = GetNumMipLevels();
- createInfo.arrayLayers = GetArrayLayers();
- createInfo.samples = VulkanSampleCount(GetSampleCount());
- createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
- createInfo.usage = VulkanImageUsage(GetUsage(), GetFormat());
- createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- createInfo.queueFamilyIndexCount = 0;
- createInfo.pQueueFamilyIndices = nullptr;
- createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ external_memory::Service* externalMemoryService) {
+ VkFormat format = VulkanImageFormat(GetFormat().format);
+ VkImageUsageFlags usage = VulkanImageUsage(GetUsage(), GetFormat());
+ if (!externalMemoryService->SupportsCreateImage(descriptor, format, usage)) {
+ return DAWN_VALIDATION_ERROR("Creating an image from external memory is not supported");
+ }
- ASSERT(IsSampleCountSupported(device, createInfo));
+ mExternalState = ExternalState::PendingAcquire;
+ VkImageCreateInfo baseCreateInfo = {};
+ baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+ baseCreateInfo.pNext = nullptr;
+ baseCreateInfo.imageType = VulkanImageType(GetDimension());
+ baseCreateInfo.format = format;
+ baseCreateInfo.extent = VulkanExtent3D(GetSize());
+ baseCreateInfo.mipLevels = GetNumMipLevels();
+ baseCreateInfo.arrayLayers = GetArrayLayers();
+ baseCreateInfo.samples = VulkanSampleCount(GetSampleCount());
+ baseCreateInfo.usage = usage;
+ baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ baseCreateInfo.queueFamilyIndexCount = 0;
+ baseCreateInfo.pQueueFamilyIndices = nullptr;
// We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
// that are used in vkCmdClearColorImage() must have been created with this flag, which is
// also required for the implementation of robust resource initialization.
- createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
- DAWN_TRY(CheckVkSuccess(
- device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &mHandle),
- "CreateImage"));
-
- // Create the image memory and associate it with the container
- VkMemoryRequirements requirements;
- device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+ baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- ASSERT(requirements.size <= descriptor->allocationSize);
+ DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+ return {};
+ }
+ MaybeError Texture::BindExternalMemory(const ExternalImageDescriptor* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores) {
+ Device* device = ToBackend(GetDevice());
DAWN_TRY(CheckVkSuccess(
device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
"BindImageMemory (external)"));
@@ -538,7 +532,6 @@ namespace dawn_native { namespace vulkan {
mExternalAllocation = externalMemoryAllocation;
mSignalSemaphore = signalSemaphore;
mWaitRequirements = std::move(waitSemaphores);
-
return {};
}
@@ -558,7 +551,7 @@ namespace dawn_native { namespace vulkan {
// Release the texture
mExternalState = ExternalState::PendingRelease;
- TransitionUsageNow(device->GetPendingRecordingContext(), dawn::TextureUsage::None);
+ TransitionUsageNow(device->GetPendingRecordingContext(), wgpu::TextureUsage::None);
// Queue submit to signal we are done with the texture
device->GetPendingRecordingContext()->signalSemaphores.push_back(mSignalSemaphore);
@@ -578,28 +571,26 @@ namespace dawn_native { namespace vulkan {
}
void Texture::DestroyImpl() {
- Device* device = ToBackend(GetDevice());
+ if (GetTextureState() == TextureState::OwnedInternal) {
+ Device* device = ToBackend(GetDevice());
- // If we own the resource, release it.
- if (mMemoryAllocation.GetMemory() != VK_NULL_HANDLE) {
- // We need to free both the memory allocation and the container. Memory should be
- // freed after the VkImage is destroyed and this is taken care of by the
- // FencedDeleter.
- device->GetMemoryAllocator()->Free(&mMemoryAllocation);
- }
+ // For textures created from a VkImage, the allocation if kInvalid so the Device knows
+ // to skip the deallocation of the (absence of) VkDeviceMemory.
+ device->DeallocateMemory(&mMemoryAllocation);
- if (mHandle != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
- }
+ if (mHandle != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ }
- if (mExternalAllocation != VK_NULL_HANDLE) {
- device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
- }
+ if (mExternalAllocation != VK_NULL_HANDLE) {
+ device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
+ }
- mHandle = VK_NULL_HANDLE;
- mExternalAllocation = VK_NULL_HANDLE;
- // If a signal semaphore exists it should be requested before we delete the texture
- ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
+ mHandle = VK_NULL_HANDLE;
+ mExternalAllocation = VK_NULL_HANDLE;
+ // If a signal semaphore exists it should be requested before we delete the texture
+ ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
+ }
}
VkImage Texture::GetHandle() const {
@@ -611,7 +602,7 @@ namespace dawn_native { namespace vulkan {
}
void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
- dawn::TextureUsage usage) {
+ wgpu::TextureUsage usage) {
// Avoid encoding barriers when it isn't needed.
bool lastReadOnly = (mLastUsage & kReadOnlyTextureUsages) == mLastUsage;
if (lastReadOnly && mLastUsage == usage && mLastExternalState == mExternalState) {
@@ -632,7 +623,7 @@ namespace dawn_native { namespace vulkan {
barrier.newLayout = VulkanImageLayout(usage, format);
barrier.image = mHandle;
// This transitions the whole resource but assumes it is a 2D texture
- ASSERT(GetDimension() == dawn::TextureDimension::e2D);
+ ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
barrier.subresourceRange.aspectMask = VulkanAspectMask(format);
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = GetNumMipLevels();
@@ -686,7 +677,7 @@ namespace dawn_native { namespace vulkan {
range.layerCount = layerCount;
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
- TransitionUsageNow(recordingContext, dawn::TextureUsage::CopyDst);
+ TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst);
if (GetFormat().isRenderable) {
if (GetFormat().HasDepthOrStencil()) {
VkClearDepthStencilValue clearDepthStencilValue[1];
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index dd1d5f5393a..82366ae0ef0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -18,36 +18,39 @@
#include "dawn_native/Texture.h"
#include "common/vulkan_platform.h"
+#include "dawn_native/ResourceMemoryAllocation.h"
#include "dawn_native/vulkan/ExternalHandle.h"
-#include "dawn_native/vulkan/MemoryAllocator.h"
+#include "dawn_native/vulkan/external_memory/MemoryService.h"
namespace dawn_native { namespace vulkan {
struct CommandRecordingContext;
+ class Device;
struct ExternalImageDescriptor;
- VkFormat VulkanImageFormat(dawn::TextureFormat format);
- VkImageUsageFlags VulkanImageUsage(dawn::TextureUsage usage, const Format& format);
+ VkFormat VulkanImageFormat(wgpu::TextureFormat format);
+ VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
const TextureDescriptor* descriptor);
+ bool IsSampleCountSupported(const dawn_native::vulkan::Device* device,
+ const VkImageCreateInfo& imageCreateInfo);
+
class Texture : public TextureBase {
public:
// Used to create a regular texture from a descriptor.
static ResultOrError<Texture*> Create(Device* device, const TextureDescriptor* descriptor);
- // Used to create a texture from Vulkan external memory objects.
- // Ownership of semaphores and the memory allocation is taken only if the creation is
- // a success.
+ // Creates a texture and initializes it with a VkImage that references an external memory
+ // object. Before the texture can be used, the VkDeviceMemory associated with the external
+ // image must be bound via Texture::BindExternalMemory.
static ResultOrError<Texture*> CreateFromExternal(
Device* device,
const ExternalImageDescriptor* descriptor,
const TextureDescriptor* textureDescriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores);
+ external_memory::Service* externalMemoryService);
Texture(Device* device, const TextureDescriptor* descriptor, VkImage nativeImage);
~Texture();
@@ -59,7 +62,7 @@ namespace dawn_native { namespace vulkan {
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
void TransitionUsageNow(CommandRecordingContext* recordingContext,
- dawn::TextureUsage usage);
+ wgpu::TextureUsage usage);
void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
uint32_t baseMipLevel,
uint32_t levelCount,
@@ -67,14 +70,19 @@ namespace dawn_native { namespace vulkan {
uint32_t layerCount);
MaybeError SignalAndDestroy(VkSemaphore* outSignalSemaphore);
+ // Binds externally allocated memory to the VkImage and on success, takes ownership of
+ // semaphores.
+ MaybeError BindExternalMemory(const ExternalImageDescriptor* descriptor,
+ VkSemaphore signalSemaphore,
+ VkDeviceMemory externalMemoryAllocation,
+ std::vector<VkSemaphore> waitSemaphores);
private:
using TextureBase::TextureBase;
MaybeError InitializeAsInternalTexture();
+
MaybeError InitializeFromExternal(const ExternalImageDescriptor* descriptor,
- VkSemaphore signalSemaphore,
- VkDeviceMemory externalMemoryAllocation,
- std::vector<VkSemaphore> waitSemaphores);
+ external_memory::Service* externalMemoryService);
void DestroyImpl() override;
MaybeError ClearTexture(CommandRecordingContext* recordingContext,
@@ -85,7 +93,7 @@ namespace dawn_native { namespace vulkan {
TextureBase::ClearValue);
VkImage mHandle = VK_NULL_HANDLE;
- DeviceMemoryAllocation mMemoryAllocation;
+ ResourceMemoryAllocation mMemoryAllocation;
VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
enum class ExternalState {
@@ -103,7 +111,7 @@ namespace dawn_native { namespace vulkan {
// A usage of none will make sure the texture is transitioned before its first use as
// required by the Vulkan spec.
- dawn::TextureUsage mLastUsage = dawn::TextureUsage::None;
+ wgpu::TextureUsage mLastUsage = wgpu::TextureUsage::None;
};
class TextureView : public TextureViewBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index dd81f3fb5ce..790336ba7dc 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -21,23 +21,23 @@
namespace dawn_native { namespace vulkan {
- VkCompareOp ToVulkanCompareOp(dawn::CompareFunction op) {
+ VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
switch (op) {
- case dawn::CompareFunction::Always:
+ case wgpu::CompareFunction::Always:
return VK_COMPARE_OP_ALWAYS;
- case dawn::CompareFunction::Equal:
+ case wgpu::CompareFunction::Equal:
return VK_COMPARE_OP_EQUAL;
- case dawn::CompareFunction::Greater:
+ case wgpu::CompareFunction::Greater:
return VK_COMPARE_OP_GREATER;
- case dawn::CompareFunction::GreaterEqual:
+ case wgpu::CompareFunction::GreaterEqual:
return VK_COMPARE_OP_GREATER_OR_EQUAL;
- case dawn::CompareFunction::Less:
+ case wgpu::CompareFunction::Less:
return VK_COMPARE_OP_LESS;
- case dawn::CompareFunction::LessEqual:
+ case wgpu::CompareFunction::LessEqual:
return VK_COMPARE_OP_LESS_OR_EQUAL;
- case dawn::CompareFunction::Never:
+ case wgpu::CompareFunction::Never:
return VK_COMPARE_OP_NEVER;
- case dawn::CompareFunction::NotEqual:
+ case wgpu::CompareFunction::NotEqual:
return VK_COMPARE_OP_NOT_EQUAL;
default:
UNREACHABLE();
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
index 8f4b5acee94..02ef6d3737b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
@@ -21,7 +21,7 @@
namespace dawn_native { namespace vulkan {
- VkCompareOp ToVulkanCompareOp(dawn::CompareFunction op);
+ VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
index 60eda449326..ad99a27401d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanBackend.cpp
@@ -28,43 +28,55 @@
namespace dawn_native { namespace vulkan {
- VkInstance GetInstance(DawnDevice device) {
+ VkInstance GetInstance(WGPUDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return backendDevice->GetVkInstance();
}
+ DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
+ const char* pName) {
+ Device* backendDevice = reinterpret_cast<Device*>(device);
+ return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
+ }
+
// Explicitly export this function because it uses the "native" type for surfaces while the
// header as seen in this file uses the wrapped type.
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(DawnDevice device, VkSurfaceKHRNative surfaceNative) {
+ CreateNativeSwapChainImpl(WGPUDevice device, VkSurfaceKHRNative surfaceNative) {
Device* backendDevice = reinterpret_cast<Device*>(device);
VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
- impl.textureUsage = DAWN_TEXTURE_USAGE_PRESENT;
+ impl.textureUsage = WGPUTextureUsage_Present;
return impl;
}
- DawnTextureFormat GetNativeSwapChainPreferredFormat(
+ WGPUTextureFormat GetNativeSwapChainPreferredFormat(
const DawnSwapChainImplementation* swapChain) {
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
- return static_cast<DawnTextureFormat>(impl->GetPreferredFormat());
+ return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
}
#ifdef DAWN_PLATFORM_LINUX
- DawnTexture WrapVulkanImageOpaqueFD(DawnDevice cDevice,
- const ExternalImageDescriptorOpaqueFD* descriptor) {
- Device* device = reinterpret_cast<Device*>(cDevice);
+ ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageDescriptorType type)
+ : type(type) {
+ }
- TextureBase* texture = device->CreateTextureWrappingVulkanImage(
- descriptor, descriptor->memoryFD, descriptor->waitFDs);
+ ExternalImageDescriptorFD::ExternalImageDescriptorFD(ExternalImageDescriptorType type)
+ : ExternalImageDescriptor(type) {
+ }
- return reinterpret_cast<DawnTexture>(texture);
+ ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
+ : ExternalImageDescriptorFD(ExternalImageDescriptorType::OpaqueFD) {
}
- int ExportSignalSemaphoreOpaqueFD(DawnDevice cDevice, DawnTexture cTexture) {
+ ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
+ : ExternalImageDescriptorFD(ExternalImageDescriptorType::DmaBuf) {
+ }
+
+ int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice, WGPUTexture cTexture) {
Device* device = reinterpret_cast<Device*>(cDevice);
Texture* texture = reinterpret_cast<Texture*>(cTexture);
@@ -79,6 +91,23 @@ namespace dawn_native { namespace vulkan {
return outHandle;
}
+
+ WGPUTexture WrapVulkanImage(WGPUDevice cDevice, const ExternalImageDescriptor* descriptor) {
+ Device* device = reinterpret_cast<Device*>(cDevice);
+
+ switch (descriptor->type) {
+ case ExternalImageDescriptorType::OpaqueFD:
+ case ExternalImageDescriptorType::DmaBuf: {
+ const ExternalImageDescriptorFD* fdDescriptor =
+ static_cast<const ExternalImageDescriptorFD*>(descriptor);
+ TextureBase* texture = device->CreateTextureWrappingVulkanImage(
+ descriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs);
+ return reinterpret_cast<WGPUTexture>(texture);
+ }
+ default:
+ return nullptr;
+ }
+ }
#endif
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
index d3bbe67d8e5..5cec83a2b99 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.cpp
@@ -119,7 +119,7 @@ namespace dawn_native { namespace vulkan {
}
MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
- const VulkanDeviceKnobs& usedKnobs) {
+ const VulkanDeviceInfo& deviceInfo) {
GET_DEVICE_PROC(AllocateCommandBuffers);
GET_DEVICE_PROC(AllocateDescriptorSets);
GET_DEVICE_PROC(AllocateMemory);
@@ -240,35 +240,35 @@ namespace dawn_native { namespace vulkan {
GET_DEVICE_PROC(UpdateDescriptorSets);
GET_DEVICE_PROC(WaitForFences);
- if (usedKnobs.debugMarker) {
+ if (deviceInfo.debugMarker) {
GET_DEVICE_PROC(CmdDebugMarkerBeginEXT);
GET_DEVICE_PROC(CmdDebugMarkerEndEXT);
GET_DEVICE_PROC(CmdDebugMarkerInsertEXT);
}
- if (usedKnobs.externalMemoryFD) {
+ if (deviceInfo.externalMemoryFD) {
GET_DEVICE_PROC(GetMemoryFdKHR);
GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
}
- if (usedKnobs.externalSemaphoreFD) {
+ if (deviceInfo.externalSemaphoreFD) {
GET_DEVICE_PROC(ImportSemaphoreFdKHR);
GET_DEVICE_PROC(GetSemaphoreFdKHR);
}
#if VK_USE_PLATFORM_FUCHSIA
- if (usedKnobs.externalMemoryZirconHandle) {
+ if (deviceInfo.externalMemoryZirconHandle) {
GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
}
- if (usedKnobs.externalSemaphoreZirconHandle) {
+ if (deviceInfo.externalSemaphoreZirconHandle) {
GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
}
#endif
- if (usedKnobs.swapchain) {
+ if (deviceInfo.swapchain) {
GET_DEVICE_PROC(CreateSwapchainKHR);
GET_DEVICE_PROC(DestroySwapchainKHR);
GET_DEVICE_PROC(GetSwapchainImagesKHR);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
index 28e4096cbe3..eb5a4725d72 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanFunctions.h
@@ -24,14 +24,14 @@ class DynamicLib;
namespace dawn_native { namespace vulkan {
struct VulkanGlobalInfo;
- struct VulkanDeviceKnobs;
+ struct VulkanDeviceInfo;
// Stores the Vulkan entry points. Also loads them from the dynamic library
// and the vkGet*ProcAddress entry points.
struct VulkanFunctions {
MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
- MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceKnobs& usedKnobs);
+ MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
// ---------- Global procs
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
index 18cefb272c9..ecaa20e7063 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.cpp
@@ -59,6 +59,8 @@ namespace dawn_native { namespace vulkan {
const char kExtensionNameKhrExternalMemoryCapabilities[] =
"VK_KHR_external_memory_capabilities";
const char kExtensionNameKhrExternalMemoryFD[] = "VK_KHR_external_memory_fd";
+ const char kExtensionNameExtExternalMemoryDmaBuf[] = "VK_EXT_external_memory_dma_buf";
+ const char kExtensionNameExtImageDrmFormatModifier[] = "VK_EXT_image_drm_format_modifier";
const char kExtensionNameFuchsiaExternalMemory[] = "VK_FUCHSIA_external_memory";
const char kExtensionNameKhrExternalSemaphore[] = "VK_KHR_external_semaphore";
const char kExtensionNameKhrExternalSemaphoreCapabilities[] =
@@ -287,6 +289,12 @@ namespace dawn_native { namespace vulkan {
if (IsExtensionName(extension, kExtensionNameKhrExternalMemoryFD)) {
info.externalMemoryFD = true;
}
+ if (IsExtensionName(extension, kExtensionNameExtExternalMemoryDmaBuf)) {
+ info.externalMemoryDmaBuf = true;
+ }
+ if (IsExtensionName(extension, kExtensionNameExtImageDrmFormatModifier)) {
+ info.imageDrmFormatModifier = true;
+ }
if (IsExtensionName(extension, kExtensionNameFuchsiaExternalMemory)) {
info.externalMemoryZirconHandle = true;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
index 2da3466ba21..ac64af0b4fe 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/VulkanInfo.h
@@ -36,6 +36,8 @@ namespace dawn_native { namespace vulkan {
extern const char kExtensionNameKhrExternalMemory[];
extern const char kExtensionNameKhrExternalMemoryCapabilities[];
extern const char kExtensionNameKhrExternalMemoryFD[];
+ extern const char kExtensionNameExtExternalMemoryDmaBuf[];
+ extern const char kExtensionNameExtImageDrmFormatModifier[];
extern const char kExtensionNameFuchsiaExternalMemory[];
extern const char kExtensionNameKhrExternalSemaphore[];
extern const char kExtensionNameKhrExternalSemaphoreCapabilities[];
@@ -88,6 +90,8 @@ namespace dawn_native { namespace vulkan {
bool debugMarker = false;
bool externalMemory = false;
bool externalMemoryFD = false;
+ bool externalMemoryDmaBuf = false;
+ bool imageDrmFormatModifier = false;
bool externalMemoryZirconHandle = false;
bool externalSemaphore = false;
bool externalSemaphoreFD = false;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h
index e49d3ff8a2b..0c4b64d49ad 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryService.h
@@ -17,6 +17,7 @@
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
+#include "dawn_native/VulkanBackend.h"
#include "dawn_native/vulkan/ExternalHandle.h"
namespace dawn_native { namespace vulkan {
@@ -25,22 +26,41 @@ namespace dawn_native { namespace vulkan {
namespace dawn_native { namespace vulkan { namespace external_memory {
+ struct MemoryImportParams {
+ VkDeviceSize allocationSize;
+ uint32_t memoryTypeIndex;
+ };
+
class Service {
public:
explicit Service(Device* device);
~Service();
- // True if the device reports it supports this feature
- bool Supported(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags);
+ // True if the device reports it supports importing external memory.
+ bool SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags);
+
+ // True if the device reports it supports creating VkImages from external memory.
+ bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage);
+
+ // Returns the parameters required for importing memory
+ ResultOrError<MemoryImportParams> GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image);
// Given an external handle pointing to memory, import it into a VkDeviceMemory
ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
- VkDeviceSize allocationSize,
- uint32_t memoryTypeIndex);
+ const MemoryImportParams& importParams,
+ VkImage image);
+
+ // Create a VkImage for the given handle type
+ ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo);
private:
Device* mDevice = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
new file mode 100644
index 00000000000..e9944f8a4af
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -0,0 +1,271 @@
+// Copyright 2019 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "common/Assert.h"
+#include "dawn_native/vulkan/AdapterVk.h"
+#include "dawn_native/vulkan/BackendVk.h"
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/VulkanError.h"
+#include "dawn_native/vulkan/external_memory/MemoryService.h"
+
+namespace dawn_native { namespace vulkan { namespace external_memory {
+
+ namespace {
+
+ // Some modifiers use multiple planes (for example, see the comment for
+ // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h), but dma-buf import in Dawn only
+ // supports single-plane formats.
+ ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
+ VkPhysicalDevice physicalDevice,
+ VkFormat format,
+ uint64_t modifier) {
+ VkDrmFormatModifierPropertiesListEXT formatModifierPropsList;
+ formatModifierPropsList.sType =
+ VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT;
+ formatModifierPropsList.pNext = nullptr;
+ formatModifierPropsList.drmFormatModifierCount = 0;
+ formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
+
+ VkFormatProperties2 formatProps;
+ formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+ formatProps.pNext = &formatModifierPropsList;
+
+ fn.GetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, &formatProps);
+
+ uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
+ std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierProps(modifierCount);
+ formatModifierPropsList.pDrmFormatModifierProperties = formatModifierProps.data();
+
+ fn.GetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, &formatProps);
+ for (const auto& props : formatModifierProps) {
+ if (props.drmFormatModifier == modifier) {
+ uint32_t count = props.drmFormatModifierPlaneCount;
+ return count;
+ }
+ }
+ return DAWN_VALIDATION_ERROR("DRM format modifier not supported");
+ }
+
+ } // anonymous namespace
+
+ Service::Service(Device* device) : mDevice(device) {
+ const VulkanDeviceInfo& deviceInfo = mDevice->GetDeviceInfo();
+ const VulkanGlobalInfo& globalInfo =
+ ToBackend(mDevice->GetAdapter())->GetBackend()->GetGlobalInfo();
+
+ mSupported = globalInfo.getPhysicalDeviceProperties2 &&
+ globalInfo.externalMemoryCapabilities && deviceInfo.externalMemory &&
+ deviceInfo.externalMemoryFD && deviceInfo.externalMemoryDmaBuf &&
+ deviceInfo.imageDrmFormatModifier;
+ }
+
+ Service::~Service() = default;
+
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
+ return mSupported;
+ }
+
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage) {
+ // Early out before we try using extension functions
+ if (!mSupported) {
+ return false;
+ }
+ if (descriptor->type != ExternalImageDescriptorType::DmaBuf) {
+ return false;
+ }
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+
+ // Verify plane count for the modifier.
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ uint32_t planeCount = 0;
+ if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
+ dmaBufDescriptor->drmModifier),
+ &planeCount)) {
+ return false;
+ }
+ if (planeCount == 0) {
+ return false;
+ }
+ // TODO(hob): Support multi-plane formats like I915_FORMAT_MOD_Y_TILED_CCS.
+ if (planeCount > 1) {
+ return false;
+ }
+
+ // Verify that the format modifier of the external memory and the requested Vulkan format
+ // are actually supported together in a dma-buf import.
+ VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo;
+ drmModifierInfo.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
+ drmModifierInfo.pNext = nullptr;
+ drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo;
+ externalImageFormatInfo.sType =
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
+ externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+ externalImageFormatInfo.pNext = &drmModifierInfo;
+
+ VkPhysicalDeviceImageFormatInfo2 imageFormatInfo;
+ imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+ imageFormatInfo.format = format;
+ imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+ imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+ imageFormatInfo.usage = usage;
+ imageFormatInfo.flags = 0;
+ imageFormatInfo.pNext = &externalImageFormatInfo;
+
+ VkExternalImageFormatProperties externalImageFormatProps;
+ externalImageFormatProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
+ externalImageFormatProps.pNext = nullptr;
+
+ VkImageFormatProperties2 imageFormatProps;
+ imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+ imageFormatProps.pNext = &externalImageFormatProps;
+
+ VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2KHR(
+ physicalDevice, &imageFormatInfo, &imageFormatProps);
+ if (result != VK_SUCCESS) {
+ return false;
+ }
+ VkExternalMemoryFeatureFlags featureFlags =
+ externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
+ return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ if (descriptor->type != ExternalImageDescriptorType::DmaBuf) {
+ return DAWN_VALIDATION_ERROR("ExternalImageDescriptor is not a dma-buf descriptor");
+ }
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkDevice device = mDevice->GetVkDevice();
+
+ // Get the valid memory types for the VkImage.
+ VkMemoryRequirements memoryRequirements;
+ mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
+
+ VkMemoryFdPropertiesKHR fdProperties;
+ fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+ fdProperties.pNext = nullptr;
+
+ // Get the valid memory types that the external memory can be imported as.
+ mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ dmaBufDescriptor->memoryFD, &fdProperties);
+ // Choose the best memory type that satisfies both the image's constraint and the import's
+ // constraint.
+ memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
+ int memoryTypeIndex =
+ mDevice->FindBestMemoryTypeIndex(memoryRequirements, false /** mappable */);
+ if (memoryTypeIndex == -1) {
+ return DAWN_VALIDATION_ERROR("Unable to find appropriate memory type for import");
+ }
+ MemoryImportParams params = {memoryRequirements.size, memoryTypeIndex};
+ return params;
+ }
+
+ ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ if (handle < 0) {
+ return DAWN_VALIDATION_ERROR("Trying to import memory with invalid handle");
+ }
+
+ VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
+ memoryDedicatedAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
+ memoryDedicatedAllocateInfo.pNext = nullptr;
+ memoryDedicatedAllocateInfo.image = image;
+ memoryDedicatedAllocateInfo.buffer = VK_NULL_HANDLE;
+
+ VkImportMemoryFdInfoKHR importMemoryFdInfo;
+ importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+ importMemoryFdInfo.pNext = &memoryDedicatedAllocateInfo;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+ importMemoryFdInfo.fd = handle;
+
+ VkMemoryAllocateInfo memoryAllocateInfo;
+ memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ memoryAllocateInfo.pNext = &importMemoryFdInfo;
+ memoryAllocateInfo.allocationSize = importParams.allocationSize;
+ memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+
+ VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+ DAWN_TRY(
+ CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
+ nullptr, &allocatedMemory),
+ "vkAllocateMemory"));
+ return allocatedMemory;
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ if (descriptor->type != ExternalImageDescriptorType::DmaBuf) {
+ return DAWN_VALIDATION_ERROR("ExternalImageDescriptor is not a dma-buf descriptor");
+ }
+ const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+ static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+ VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+ VkDevice device = mDevice->GetVkDevice();
+
+ // Dawn currently doesn't support multi-plane formats, so we only need to create a single
+ // VkSubresourceLayout here.
+ VkSubresourceLayout planeLayout;
+ planeLayout.offset = 0;
+ planeLayout.size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
+ planeLayout.rowPitch = dmaBufDescriptor->stride;
+ planeLayout.arrayPitch = 0; // Not an array texture
+ planeLayout.depthPitch = 0; // Not a depth texture
+
+ uint32_t planeCount;
+ DAWN_TRY_ASSIGN(planeCount,
+ GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
+ dmaBufDescriptor->drmModifier));
+ ASSERT(planeCount == 1);
+
+ VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo;
+ explicitCreateInfo.sType =
+ VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
+ explicitCreateInfo.pNext = NULL;
+ explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+ explicitCreateInfo.drmFormatModifierPlaneCount = planeCount;
+ explicitCreateInfo.pPlaneLayouts = &planeLayout;
+
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = &explicitCreateInfo;
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ createInfo.pNext = &externalMemoryImageCreateInfo;
+ createInfo.flags = 0;
+ createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+
+ // Create a new VkImage with tiling equal to the DRM format modifier.
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &image),
+ "CreateImage"));
+ return image;
+ }
+
+}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp
index 7804ad3623a..14d882a56ab 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceNull.cpp
@@ -24,18 +24,35 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
Service::~Service() = default;
- bool Service::Supported(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
return false;
}
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage) {
+ return false;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+ }
+
ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- VkDeviceSize allocationSize,
- uint32_t memoryTypeIndex) {
- return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+ const MemoryImportParams& importParams,
+ VkImage image) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+ }
+
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
}
}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index d6e0e5a0fa7..2a31b311874 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "common/Assert.h"
#include "dawn_native/vulkan/AdapterVk.h"
#include "dawn_native/vulkan/BackendVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/TextureVk.h"
#include "dawn_native/vulkan/VulkanError.h"
#include "dawn_native/vulkan/external_memory/MemoryService.h"
@@ -32,11 +34,11 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
Service::~Service() = default;
- bool Service::Supported(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
// Early out before we try using extension functions
if (!mSupported) {
return false;
@@ -79,13 +81,39 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
!(memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR);
}
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage) {
+ return mSupported;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ if (descriptor->type != ExternalImageDescriptorType::OpaqueFD) {
+ return DAWN_VALIDATION_ERROR("ExternalImageDescriptor is not an OpaqueFD descriptor");
+ }
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+ }
+
ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- VkDeviceSize allocationSize,
- uint32_t memoryTypeIndex) {
+ const MemoryImportParams& importParams,
+ VkImage image) {
if (handle < 0) {
return DAWN_VALIDATION_ERROR("Trying to import memory with invalid handle");
}
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ if (requirements.size > importParams.allocationSize) {
+ return DAWN_VALIDATION_ERROR("Requested allocation size is too small for image");
+ }
+
VkImportMemoryFdInfoKHR importMemoryFdInfo;
importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
importMemoryFdInfo.pNext = nullptr;
@@ -95,8 +123,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkMemoryAllocateInfo allocateInfo;
allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocateInfo.pNext = &importMemoryFdInfo;
- allocateInfo.allocationSize = allocationSize;
- allocateInfo.memoryTypeIndex = memoryTypeIndex;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
@@ -105,4 +133,20 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
return allocatedMemory;
}
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &image),
+ "CreateImage"));
+ return image;
+ }
+
}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 1788f70cad0..8c70c677c51 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "common/Assert.h"
#include "dawn_native/vulkan/AdapterVk.h"
#include "dawn_native/vulkan/BackendVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/TextureVk.h"
#include "dawn_native/vulkan/VulkanError.h"
#include "dawn_native/vulkan/external_memory/MemoryService.h"
@@ -32,11 +34,11 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
Service::~Service() = default;
- bool Service::Supported(VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags) {
+ bool Service::SupportsImportMemory(VkFormat format,
+ VkImageType type,
+ VkImageTiling tiling,
+ VkImageUsageFlags usage,
+ VkImageCreateFlags flags) {
// Early out before we try using extension functions
if (!mSupported) {
return false;
@@ -79,13 +81,39 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
!(memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR);
}
+ bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+ VkFormat format,
+ VkImageUsageFlags usage) {
+ return mSupported;
+ }
+
+ ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+ const ExternalImageDescriptor* descriptor,
+ VkImage image) {
+ if (descriptor->type != ExternalImageDescriptorType::OpaqueFD) {
+ return DAWN_VALIDATION_ERROR("ExternalImageDescriptor is not an OpaqueFD descriptor");
+ }
+ const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+ static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+
+ MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+ opaqueFDDescriptor->memoryTypeIndex};
+ return params;
+ }
+
ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
- VkDeviceSize allocationSize,
- uint32_t memoryTypeIndex) {
+ const MemoryImportParams& importParams,
+ VkImage image) {
if (handle == ZX_HANDLE_INVALID) {
return DAWN_VALIDATION_ERROR("Trying to import memory with invalid handle");
}
+ VkMemoryRequirements requirements;
+ mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+ if (requirements.size > importParams.allocationSize) {
+ return DAWN_VALIDATION_ERROR("Requested allocation size is too small for image");
+ }
+
VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
importMemoryHandleInfo.sType =
VK_STRUCTURE_TYPE_TEMP_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA;
@@ -97,8 +125,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkMemoryAllocateInfo allocateInfo;
allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocateInfo.pNext = &importMemoryHandleInfo;
- allocateInfo.allocationSize = allocationSize;
- allocateInfo.memoryTypeIndex = memoryTypeIndex;
+ allocateInfo.allocationSize = importParams.allocationSize;
+ allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
@@ -107,4 +135,20 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
return allocatedMemory;
}
+ ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+ const VkImageCreateInfo& baseCreateInfo) {
+ VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+ createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+ createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ ASSERT(IsSampleCountSupported(mDevice, createInfo));
+
+ VkImage image;
+ DAWN_TRY(CheckVkSuccess(
+ mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &image),
+ "CreateImage"));
+ return image;
+ }
+
}}} // namespace dawn_native::vulkan::external_memory
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp
index 2727576939d..a110340f58a 100644
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp
+++ b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.cpp
@@ -18,13 +18,13 @@
namespace dawn_platform { namespace tracing {
- const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, const char* name) {
+ const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
static unsigned char disabled = 0;
if (platform == nullptr) {
return &disabled;
}
- const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(name);
+ const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
if (categoryEnabledFlag != nullptr) {
return categoryEnabledFlag;
}
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h
index 911986b9c2f..114a743d79e 100644
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h
+++ b/chromium/third_party/dawn/src/dawn_platform/tracing/EventTracer.h
@@ -20,12 +20,16 @@
namespace dawn_platform {
class Platform;
+ enum class TraceCategory;
namespace tracing {
using TraceEventHandle = uint64_t;
- const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, const char* name);
+ const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform,
+ TraceCategory category);
+
+ // TODO(enga): Simplify this API.
TraceEventHandle AddTraceEvent(Platform* platform,
char phase,
const unsigned char* categoryGroupEnabled,
diff --git a/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h b/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h
index f8d4d9e98de..159e91b1d05 100644
--- a/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h
+++ b/chromium/third_party/dawn/src/dawn_platform/tracing/TraceEvent.h
@@ -156,12 +156,6 @@
#include "dawn_platform/tracing/EventTracer.h"
-#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
-
-// By default, const char* argument values are assumed to have long-lived scope
-// and will not be copied. Use this macro to force a const char* to be copied.
-#define TRACE_STR_COPY(str) WebCore::TraceEvent::TraceStringWithCopy(str)
-
// Records a pair of begin and end events called "name" for the current
// scope, with 0, 1 or 2 associated arguments. If the category is not
// enabled, then this does nothing.
@@ -679,35 +673,37 @@
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(platform, phase, category, name, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category); \
- if (*INTERNALTRACEEVENTUID(catstatic)) { \
- dawn_platform::TraceEvent::addTraceEvent( \
- platform, phase, INTERNALTRACEEVENTUID(catstatic), name, \
- dawn_platform::TraceEvent::noEventId, flags, ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_ADD(platform, phase, category, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, \
+ ::dawn_platform::TraceCategory::category); \
+ if (*INTERNALTRACEEVENTUID(catstatic)) { \
+ dawn_platform::TraceEvent::addTraceEvent( \
+ platform, phase, INTERNALTRACEEVENTUID(catstatic), name, \
+ dawn_platform::TraceEvent::noEventId, flags, ##__VA_ARGS__); \
+ } \
} while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category); \
- dawn_platform::TraceEvent::TraceEndOnScopeClose INTERNALTRACEEVENTUID(profileScope); \
- if (*INTERNALTRACEEVENTUID(catstatic)) { \
- dawn_platform::TraceEvent::addTraceEvent( \
- platform, TRACE_EVENT_PHASE_BEGIN, INTERNALTRACEEVENTUID(catstatic), name, \
- dawn_platform::TraceEvent::noEventId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
- INTERNALTRACEEVENTUID(profileScope) \
- .initialize(platform, INTERNALTRACEEVENTUID(catstatic), name); \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(platform, category, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, ::dawn_platform::TraceCategory::category); \
+ dawn_platform::TraceEvent::TraceEndOnScopeClose INTERNALTRACEEVENTUID(profileScope); \
+ if (*INTERNALTRACEEVENTUID(catstatic)) { \
+ dawn_platform::TraceEvent::addTraceEvent( \
+ platform, TRACE_EVENT_PHASE_BEGIN, INTERNALTRACEEVENTUID(catstatic), name, \
+ dawn_platform::TraceEvent::noEventId, TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNALTRACEEVENTUID(profileScope) \
+ .initialize(platform, INTERNALTRACEEVENTUID(catstatic), name); \
}
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(platform, phase, category, name, id, flags, ...) \
do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, category); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(platform, \
+ ::dawn_platform::TraceCategory::category); \
if (*INTERNALTRACEEVENTUID(catstatic)) { \
unsigned char traceEventFlags = flags | TRACE_EVENT_FLAG_HAS_ID; \
dawn_platform::TraceEvent::TraceID traceEventTraceID(id, &traceEventFlags); \
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
index 0cbbcd6c1ea..ac0a25ae27d 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireClient.cpp
@@ -25,7 +25,7 @@ namespace dawn_wire {
mImpl.reset();
}
- DawnDevice WireClient::GetDevice() const {
+ WGPUDevice WireClient::GetDevice() const {
return mImpl->GetDevice();
}
@@ -37,7 +37,7 @@ namespace dawn_wire {
return mImpl->HandleCommands(commands, size);
}
- ReservedTexture WireClient::ReserveTexture(DawnDevice device) {
+ ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
return mImpl->ReserveTexture(device);
}
@@ -45,12 +45,12 @@ namespace dawn_wire {
MemoryTransferService::~MemoryTransferService() = default;
MemoryTransferService::ReadHandle*
- MemoryTransferService::CreateReadHandle(DawnBuffer buffer, uint64_t offset, size_t size) {
+ MemoryTransferService::CreateReadHandle(WGPUBuffer buffer, uint64_t offset, size_t size) {
return CreateReadHandle(size);
}
MemoryTransferService::WriteHandle*
- MemoryTransferService::CreateWriteHandle(DawnBuffer buffer, uint64_t offset, size_t size) {
+ MemoryTransferService::CreateWriteHandle(WGPUBuffer buffer, uint64_t offset, size_t size) {
return CreateWriteHandle(size);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
index 18966471268..11ed4244e02 100644
--- a/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/WireServer.cpp
@@ -32,7 +32,7 @@ namespace dawn_wire {
return mImpl->HandleCommands(commands, size);
}
- bool WireServer::InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation) {
+ bool WireServer::InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation) {
return mImpl->InjectTexture(texture, id, generation);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
index 12ecf896468..14f25e4b19e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
@@ -46,8 +46,8 @@ namespace dawn_wire { namespace client {
}
} // namespace
- void ClientBufferMapReadAsync(DawnBuffer cBuffer,
- DawnBufferMapReadCallback callback,
+ void ClientBufferMapReadAsync(WGPUBuffer cBuffer,
+ WGPUBufferMapReadCallback callback,
void* userdata) {
Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
@@ -59,7 +59,7 @@ namespace dawn_wire { namespace client {
MemoryTransferService::ReadHandle* readHandle =
buffer->device->GetClient()->GetMemoryTransferService()->CreateReadHandle(buffer->size);
if (readHandle == nullptr) {
- callback(DAWN_BUFFER_MAP_ASYNC_STATUS_DEVICE_LOST, nullptr, 0, userdata);
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, userdata);
return;
}
@@ -76,8 +76,8 @@ namespace dawn_wire { namespace client {
SerializeBufferMapAsync(buffer, serial, readHandle);
}
- void ClientBufferMapWriteAsync(DawnBuffer cBuffer,
- DawnBufferMapWriteCallback callback,
+ void ClientBufferMapWriteAsync(WGPUBuffer cBuffer,
+ WGPUBufferMapWriteCallback callback,
void* userdata) {
Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
@@ -90,7 +90,7 @@ namespace dawn_wire { namespace client {
buffer->device->GetClient()->GetMemoryTransferService()->CreateWriteHandle(
buffer->size);
if (writeHandle == nullptr) {
- callback(DAWN_BUFFER_MAP_ASYNC_STATUS_DEVICE_LOST, nullptr, 0, userdata);
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, userdata);
return;
}
@@ -107,8 +107,8 @@ namespace dawn_wire { namespace client {
SerializeBufferMapAsync(buffer, serial, writeHandle);
}
- DawnBuffer ClientDeviceCreateBuffer(DawnDevice cDevice,
- const DawnBufferDescriptor* descriptor) {
+ WGPUBuffer ClientDeviceCreateBuffer(WGPUDevice cDevice,
+ const WGPUBufferDescriptor* descriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
Client* wireClient = device->GetClient();
@@ -127,12 +127,12 @@ namespace dawn_wire { namespace client {
char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
cmd.Serialize(allocatedBuffer, *wireClient);
- return reinterpret_cast<DawnBuffer>(buffer);
+ return reinterpret_cast<WGPUBuffer>(buffer);
}
- DawnCreateBufferMappedResult ClientDeviceCreateBufferMapped(
- DawnDevice cDevice,
- const DawnBufferDescriptor* descriptor) {
+ WGPUCreateBufferMappedResult ClientDeviceCreateBufferMapped(
+ WGPUDevice cDevice,
+ const WGPUBufferDescriptor* descriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
Client* wireClient = device->GetClient();
@@ -140,8 +140,8 @@ namespace dawn_wire { namespace client {
Buffer* buffer = bufferObjectAndSerial->object.get();
buffer->size = descriptor->size;
- DawnCreateBufferMappedResult result;
- result.buffer = reinterpret_cast<DawnBuffer>(buffer);
+ WGPUCreateBufferMappedResult result;
+ result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
result.data = nullptr;
result.dataLength = 0;
@@ -190,9 +190,9 @@ namespace dawn_wire { namespace client {
return result;
}
- void ClientDeviceCreateBufferMappedAsync(DawnDevice cDevice,
- const DawnBufferDescriptor* descriptor,
- DawnBufferCreateMappedCallback callback,
+ void ClientDeviceCreateBufferMappedAsync(WGPUDevice cDevice,
+ const WGPUBufferDescriptor* descriptor,
+ WGPUBufferCreateMappedCallback callback,
void* userdata) {
Device* device = reinterpret_cast<Device*>(cDevice);
Client* wireClient = device->GetClient();
@@ -204,13 +204,13 @@ namespace dawn_wire { namespace client {
uint32_t serial = buffer->requestSerial++;
struct CreateBufferMappedInfo {
- DawnBuffer buffer;
- DawnBufferCreateMappedCallback callback;
+ WGPUBuffer buffer;
+ WGPUBufferCreateMappedCallback callback;
void* userdata;
};
CreateBufferMappedInfo* info = new CreateBufferMappedInfo;
- info->buffer = reinterpret_cast<DawnBuffer>(buffer);
+ info->buffer = reinterpret_cast<WGPUBuffer>(buffer);
info->callback = callback;
info->userdata = userdata;
@@ -219,21 +219,21 @@ namespace dawn_wire { namespace client {
MemoryTransferService::WriteHandle* writeHandle =
wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size);
if (writeHandle == nullptr) {
- DawnCreateBufferMappedResult result;
- result.buffer = reinterpret_cast<DawnBuffer>(buffer);
+ WGPUCreateBufferMappedResult result;
+ result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
result.data = nullptr;
result.dataLength = 0;
- callback(DAWN_BUFFER_MAP_ASYNC_STATUS_DEVICE_LOST, result, userdata);
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, result, userdata);
return;
}
Buffer::MapRequestData request;
- request.writeCallback = [](DawnBufferMapAsyncStatus status, void* data, uint64_t dataLength,
+ request.writeCallback = [](WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength,
void* userdata) {
auto info = std::unique_ptr<CreateBufferMappedInfo>(
static_cast<CreateBufferMappedInfo*>(userdata));
- DawnCreateBufferMappedResult result;
+ WGPUCreateBufferMappedResult result;
result.buffer = info->buffer;
result.data = data;
result.dataLength = dataLength;
@@ -264,36 +264,36 @@ namespace dawn_wire { namespace client {
writeHandle->SerializeCreate(allocatedBuffer + commandSize);
}
- void ClientDevicePushErrorScope(DawnDevice cDevice, DawnErrorFilter filter) {
+ void ClientDevicePushErrorScope(WGPUDevice cDevice, WGPUErrorFilter filter) {
Device* device = reinterpret_cast<Device*>(cDevice);
device->PushErrorScope(filter);
}
- bool ClientDevicePopErrorScope(DawnDevice cDevice, DawnErrorCallback callback, void* userdata) {
+ bool ClientDevicePopErrorScope(WGPUDevice cDevice, WGPUErrorCallback callback, void* userdata) {
Device* device = reinterpret_cast<Device*>(cDevice);
return device->RequestPopErrorScope(callback, userdata);
}
- uint64_t ClientFenceGetCompletedValue(DawnFence cSelf) {
+ uint64_t ClientFenceGetCompletedValue(WGPUFence cSelf) {
auto fence = reinterpret_cast<Fence*>(cSelf);
return fence->completedValue;
}
- void ClientFenceOnCompletion(DawnFence cFence,
+ void ClientFenceOnCompletion(WGPUFence cFence,
uint64_t value,
- DawnFenceOnCompletionCallback callback,
+ WGPUFenceOnCompletionCallback callback,
void* userdata) {
Fence* fence = reinterpret_cast<Fence*>(cFence);
if (value > fence->signaledValue) {
- ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
- DAWN_ERROR_TYPE_VALIDATION,
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
+ WGPUErrorType_Validation,
"Value greater than fence signaled value");
- callback(DAWN_FENCE_COMPLETION_STATUS_ERROR, userdata);
+ callback(WGPUFenceCompletionStatus_Error, userdata);
return;
}
if (value <= fence->completedValue) {
- callback(DAWN_FENCE_COMPLETION_STATUS_SUCCESS, userdata);
+ callback(WGPUFenceCompletionStatus_Success, userdata);
return;
}
@@ -303,7 +303,7 @@ namespace dawn_wire { namespace client {
fence->requests.Enqueue(std::move(request), value);
}
- void ClientBufferSetSubData(DawnBuffer cBuffer,
+ void ClientBufferSetSubData(WGPUBuffer cBuffer,
uint64_t start,
uint64_t count,
const void* data) {
@@ -321,7 +321,7 @@ namespace dawn_wire { namespace client {
cmd.Serialize(allocatedBuffer);
}
- void ClientBufferUnmap(DawnBuffer cBuffer) {
+ void ClientBufferUnmap(WGPUBuffer cBuffer) {
Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
// Invalidate the local pointer, and cancel all other in-flight requests that would
@@ -358,7 +358,7 @@ namespace dawn_wire { namespace client {
} else if (buffer->readHandle) {
buffer->readHandle = nullptr;
}
- buffer->ClearMapRequests(DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN);
+ buffer->ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
BufferUnmapCmd cmd;
cmd.self = cBuffer;
@@ -368,7 +368,7 @@ namespace dawn_wire { namespace client {
cmd.Serialize(allocatedBuffer, *buffer->device->GetClient());
}
- DawnFence ClientQueueCreateFence(DawnQueue cSelf, DawnFenceDescriptor const* descriptor) {
+ WGPUFence ClientQueueCreateFence(WGPUQueue cSelf, WGPUFenceDescriptor const* descriptor) {
Queue* queue = reinterpret_cast<Queue*>(cSelf);
Device* device = queue->device;
@@ -382,7 +382,7 @@ namespace dawn_wire { namespace client {
char* allocatedBuffer = static_cast<char*>(device->GetClient()->GetCmdSpace(requiredSize));
cmd.Serialize(allocatedBuffer, *device->GetClient());
- DawnFence cFence = reinterpret_cast<DawnFence>(allocation->object.get());
+ WGPUFence cFence = reinterpret_cast<WGPUFence>(allocation->object.get());
Fence* fence = reinterpret_cast<Fence*>(cFence);
fence->queue = queue;
@@ -391,18 +391,18 @@ namespace dawn_wire { namespace client {
return cFence;
}
- void ClientQueueSignal(DawnQueue cQueue, DawnFence cFence, uint64_t signalValue) {
+ void ClientQueueSignal(WGPUQueue cQueue, WGPUFence cFence, uint64_t signalValue) {
Fence* fence = reinterpret_cast<Fence*>(cFence);
Queue* queue = reinterpret_cast<Queue*>(cQueue);
if (fence->queue != queue) {
- ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
- DAWN_ERROR_TYPE_VALIDATION,
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
+ WGPUErrorType_Validation,
"Fence must be signaled on the queue on which it was created.");
return;
}
if (signalValue <= fence->signaledValue) {
- ClientDeviceInjectError(reinterpret_cast<DawnDevice>(fence->device),
- DAWN_ERROR_TYPE_VALIDATION,
+ ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
+ WGPUErrorType_Validation,
"Fence value less than or equal to signaled value");
return;
}
@@ -419,14 +419,14 @@ namespace dawn_wire { namespace client {
cmd.Serialize(allocatedBuffer, *fence->device->GetClient());
}
- void ClientDeviceReference(DawnDevice) {
+ void ClientDeviceReference(WGPUDevice) {
}
- void ClientDeviceRelease(DawnDevice) {
+ void ClientDeviceRelease(WGPUDevice) {
}
- void ClientDeviceSetUncapturedErrorCallback(DawnDevice cSelf,
- DawnErrorCallback callback,
+ void ClientDeviceSetUncapturedErrorCallback(WGPUDevice cSelf,
+ WGPUErrorCallback callback,
void* userdata) {
Device* device = reinterpret_cast<Device*>(cSelf);
device->SetUncapturedErrorCallback(callback, userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
index 368627775f7..3548f5133fb 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
@@ -19,10 +19,10 @@ namespace dawn_wire { namespace client {
Buffer::~Buffer() {
// Callbacks need to be fired in all cases, as they can handle freeing resources
// so we call them with "Unknown" status.
- ClearMapRequests(DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN);
+ ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
}
- void Buffer::ClearMapRequests(DawnBufferMapAsyncStatus status) {
+ void Buffer::ClearMapRequests(WGPUBufferMapAsyncStatus status) {
for (auto& it : requests) {
if (it.second.writeHandle) {
it.second.writeCallback(status, nullptr, 0, it.second.userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
index aa90d6ee0f4..09da8e34977 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_CLIENT_BUFFER_H_
#define DAWNWIRE_CLIENT_BUFFER_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
#include "dawn_wire/WireClient.h"
#include "dawn_wire/client/ObjectBase.h"
@@ -28,15 +28,15 @@ namespace dawn_wire { namespace client {
using ObjectBase::ObjectBase;
~Buffer();
- void ClearMapRequests(DawnBufferMapAsyncStatus status);
+ void ClearMapRequests(WGPUBufferMapAsyncStatus status);
// We want to defer all the validation to the server, which means we could have multiple
// map request in flight at a single time and need to track them separately.
// On well-behaved applications, only one request should exist at a single time.
struct MapRequestData {
// TODO(enga): Use a tagged pointer to save space.
- DawnBufferMapReadCallback readCallback = nullptr;
- DawnBufferMapWriteCallback writeCallback = nullptr;
+ WGPUBufferMapReadCallback readCallback = nullptr;
+ WGPUBufferMapWriteCallback writeCallback = nullptr;
void* userdata = nullptr;
// TODO(enga): Use a tagged pointer to save space.
std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
index 8079d499c18..572086f87ed 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
@@ -33,12 +33,12 @@ namespace dawn_wire { namespace client {
DeviceAllocator().Free(mDevice);
}
- ReservedTexture Client::ReserveTexture(DawnDevice cDevice) {
+ ReservedTexture Client::ReserveTexture(WGPUDevice cDevice) {
Device* device = reinterpret_cast<Device*>(cDevice);
ObjectAllocator<Texture>::ObjectAndSerial* allocation = TextureAllocator().New(device);
ReservedTexture result;
- result.texture = reinterpret_cast<DawnTexture>(allocation->object.get());
+ result.texture = reinterpret_cast<WGPUTexture>(allocation->object.get());
result.id = allocation->object->id;
result.generation = allocation->serial;
return result;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.h b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
index c1af4276339..f7d311f3c9a 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_CLIENT_CLIENT_H_
#define DAWNWIRE_CLIENT_CLIENT_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
#include <dawn_wire/Wire.h>
#include "dawn_wire/WireClient.h"
@@ -34,14 +34,14 @@ namespace dawn_wire { namespace client {
~Client();
const volatile char* HandleCommands(const volatile char* commands, size_t size);
- ReservedTexture ReserveTexture(DawnDevice device);
+ ReservedTexture ReserveTexture(WGPUDevice device);
void* GetCmdSpace(size_t size) {
return mSerializer->GetCmdSpace(size);
}
- DawnDevice GetDevice() const {
- return reinterpret_cast<DawnDeviceImpl*>(mDevice);
+ WGPUDevice GetDevice() const {
+ return reinterpret_cast<WGPUDeviceImpl*>(mDevice);
}
MemoryTransferService* GetMemoryTransferService() const {
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index 09758ea9f55..1be0f1df018 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -20,13 +20,13 @@
namespace dawn_wire { namespace client {
- bool Client::DoDeviceUncapturedErrorCallback(DawnErrorType errorType, const char* message) {
+ bool Client::DoDeviceUncapturedErrorCallback(WGPUErrorType errorType, const char* message) {
switch (errorType) {
- case DAWN_ERROR_TYPE_NO_ERROR:
- case DAWN_ERROR_TYPE_VALIDATION:
- case DAWN_ERROR_TYPE_OUT_OF_MEMORY:
- case DAWN_ERROR_TYPE_UNKNOWN:
- case DAWN_ERROR_TYPE_DEVICE_LOST:
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
break;
default:
return false;
@@ -36,7 +36,7 @@ namespace dawn_wire { namespace client {
}
bool Client::DoDevicePopErrorScopeCallback(uint64_t requestSerial,
- DawnErrorType errorType,
+ WGPUErrorType errorType,
const char* message) {
return mDevice->PopErrorScope(requestSerial, errorType, message);
}
@@ -71,7 +71,7 @@ namespace dawn_wire { namespace client {
return false;
}
- if (status == DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS) {
+ if (status == WGPUBufferMapAsyncStatus_Success) {
if (buffer->readHandle || buffer->writeHandle) {
// Buffer is already mapped.
return false;
@@ -105,11 +105,10 @@ namespace dawn_wire { namespace client {
if (!GetMappedData()) {
// Dawn promises that all callbacks are called in finite time. Even if a fatal error
// occurs, the callback is called.
- request.readCallback(DAWN_BUFFER_MAP_ASYNC_STATUS_DEVICE_LOST, nullptr, 0,
- request.userdata);
+ request.readCallback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, request.userdata);
return false;
} else {
- request.readCallback(static_cast<DawnBufferMapAsyncStatus>(status), mappedData,
+ request.readCallback(static_cast<WGPUBufferMapAsyncStatus>(status), mappedData,
static_cast<uint64_t>(mappedDataLength), request.userdata);
return true;
}
@@ -143,7 +142,7 @@ namespace dawn_wire { namespace client {
return false;
}
- if (status == DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS) {
+ if (status == WGPUBufferMapAsyncStatus_Success) {
if (buffer->readHandle || buffer->writeHandle) {
// Buffer is already mapped.
return false;
@@ -169,11 +168,11 @@ namespace dawn_wire { namespace client {
if (!GetMappedData()) {
// Dawn promises that all callbacks are called in finite time. Even if a fatal error
// occurs, the callback is called.
- request.writeCallback(DAWN_BUFFER_MAP_ASYNC_STATUS_DEVICE_LOST, nullptr, 0,
+ request.writeCallback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0,
request.userdata);
return false;
} else {
- request.writeCallback(static_cast<DawnBufferMapAsyncStatus>(status), mappedData,
+ request.writeCallback(static_cast<WGPUBufferMapAsyncStatus>(status), mappedData,
static_cast<uint64_t>(mappedDataLength), request.userdata);
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index 7682e1accf0..8577a4f644e 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -28,7 +28,7 @@ namespace dawn_wire { namespace client {
Device::~Device() {
auto errorScopes = std::move(mErrorScopes);
for (const auto& it : errorScopes) {
- it.second.callback(DAWN_ERROR_TYPE_UNKNOWN, "Device destroyed", it.second.userdata);
+ it.second.callback(WGPUErrorType_Unknown, "Device destroyed", it.second.userdata);
}
}
@@ -36,22 +36,22 @@ namespace dawn_wire { namespace client {
return mClient;
}
- void Device::HandleError(DawnErrorType errorType, const char* message) {
+ void Device::HandleError(WGPUErrorType errorType, const char* message) {
if (mErrorCallback) {
mErrorCallback(errorType, message, mErrorUserdata);
}
}
- void Device::SetUncapturedErrorCallback(DawnErrorCallback errorCallback, void* errorUserdata) {
+ void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
mErrorCallback = errorCallback;
mErrorUserdata = errorUserdata;
}
- void Device::PushErrorScope(DawnErrorFilter filter) {
+ void Device::PushErrorScope(WGPUErrorFilter filter) {
mErrorScopeStackSize++;
DevicePushErrorScopeCmd cmd;
- cmd.self = reinterpret_cast<DawnDevice>(this);
+ cmd.self = reinterpret_cast<WGPUDevice>(this);
cmd.filter = filter;
Client* wireClient = GetClient();
@@ -60,7 +60,7 @@ namespace dawn_wire { namespace client {
cmd.Serialize(allocatedBuffer, *wireClient);
}
- bool Device::RequestPopErrorScope(DawnErrorCallback callback, void* userdata) {
+ bool Device::RequestPopErrorScope(WGPUErrorCallback callback, void* userdata) {
if (mErrorScopeStackSize == 0) {
return false;
}
@@ -72,7 +72,7 @@ namespace dawn_wire { namespace client {
mErrorScopes[serial] = {callback, userdata};
DevicePopErrorScopeCmd cmd;
- cmd.device = reinterpret_cast<DawnDevice>(this);
+ cmd.device = reinterpret_cast<WGPUDevice>(this);
cmd.requestSerial = serial;
Client* wireClient = GetClient();
@@ -83,13 +83,13 @@ namespace dawn_wire { namespace client {
return true;
}
- bool Device::PopErrorScope(uint64_t requestSerial, DawnErrorType type, const char* message) {
+ bool Device::PopErrorScope(uint64_t requestSerial, WGPUErrorType type, const char* message) {
switch (type) {
- case DAWN_ERROR_TYPE_NO_ERROR:
- case DAWN_ERROR_TYPE_VALIDATION:
- case DAWN_ERROR_TYPE_OUT_OF_MEMORY:
- case DAWN_ERROR_TYPE_UNKNOWN:
- case DAWN_ERROR_TYPE_DEVICE_LOST:
+ case WGPUErrorType_NoError:
+ case WGPUErrorType_Validation:
+ case WGPUErrorType_OutOfMemory:
+ case WGPUErrorType_Unknown:
+ case WGPUErrorType_DeviceLost:
break;
default:
return false;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
index 600a1c367ff..9c1bb2f9fa1 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_CLIENT_DEVICE_H_
#define DAWNWIRE_CLIENT_DEVICE_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
#include "dawn_wire/client/ObjectBase.h"
@@ -31,16 +31,16 @@ namespace dawn_wire { namespace client {
~Device();
Client* GetClient();
- void HandleError(DawnErrorType errorType, const char* message);
- void SetUncapturedErrorCallback(DawnErrorCallback errorCallback, void* errorUserdata);
+ void HandleError(WGPUErrorType errorType, const char* message);
+ void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
- void PushErrorScope(DawnErrorFilter filter);
- bool RequestPopErrorScope(DawnErrorCallback callback, void* userdata);
- bool PopErrorScope(uint64_t requestSerial, DawnErrorType type, const char* message);
+ void PushErrorScope(WGPUErrorFilter filter);
+ bool RequestPopErrorScope(WGPUErrorCallback callback, void* userdata);
+ bool PopErrorScope(uint64_t requestSerial, WGPUErrorType type, const char* message);
private:
struct ErrorScopeData {
- DawnErrorCallback callback = nullptr;
+ WGPUErrorCallback callback = nullptr;
void* userdata = nullptr;
};
std::map<uint64_t, ErrorScopeData> mErrorScopes;
@@ -48,7 +48,7 @@ namespace dawn_wire { namespace client {
uint64_t mErrorScopeStackSize = 0;
Client* mClient = nullptr;
- DawnErrorCallback mErrorCallback = nullptr;
+ WGPUErrorCallback mErrorCallback = nullptr;
void* mErrorUserdata;
};
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
index 497b714f50c..607483e520f 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
@@ -20,14 +20,14 @@ namespace dawn_wire { namespace client {
// Callbacks need to be fired in all cases, as they can handle freeing resources
// so we call them with "Unknown" status.
for (auto& request : requests.IterateAll()) {
- request.completionCallback(DAWN_FENCE_COMPLETION_STATUS_UNKNOWN, request.userdata);
+ request.completionCallback(WGPUFenceCompletionStatus_Unknown, request.userdata);
}
requests.Clear();
}
void Fence::CheckPassedFences() {
for (auto& request : requests.IterateUpTo(completedValue)) {
- request.completionCallback(DAWN_FENCE_COMPLETION_STATUS_SUCCESS, request.userdata);
+ request.completionCallback(WGPUFenceCompletionStatus_Success, request.userdata);
}
requests.ClearUpTo(completedValue);
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Fence.h b/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
index d9b5e57bee9..4acde6d6837 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_CLIENT_FENCE_H_
#define DAWNWIRE_CLIENT_FENCE_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
#include "common/SerialMap.h"
#include "dawn_wire/client/ObjectBase.h"
@@ -30,7 +30,7 @@ namespace dawn_wire { namespace client {
void CheckPassedFences();
struct OnCompletionData {
- DawnFenceOnCompletionCallback completionCallback = nullptr;
+ WGPUFenceOnCompletionCallback completionCallback = nullptr;
void* userdata = nullptr;
};
Queue* queue = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h b/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h
index b97b0409a28..edf18f6c87f 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ObjectBase.h
@@ -15,7 +15,7 @@
#ifndef DAWNWIRE_CLIENT_OBJECTBASE_H_
#define DAWNWIRE_CLIENT_OBJECTBASE_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
namespace dawn_wire { namespace client {
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h b/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
index 8afb7ea2d79..55d6a0962aa 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ObjectStorage.h
@@ -41,7 +41,7 @@ namespace dawn_wire { namespace server {
enum class BufferMapWriteState { Unmapped, Mapped, MapError };
template <>
- struct ObjectData<DawnBuffer> : public ObjectDataBase<DawnBuffer> {
+ struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
// TODO(enga): Use a tagged pointer to save space.
std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
index ab56c4e45b3..d03980acf14 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.cpp
@@ -17,7 +17,7 @@
namespace dawn_wire { namespace server {
- Server::Server(DawnDevice device,
+ Server::Server(WGPUDevice device,
const DawnProcTable& procs,
CommandSerializer* serializer,
MemoryTransferService* memoryTransferService)
@@ -42,8 +42,8 @@ namespace dawn_wire { namespace server {
return mSerializer->GetCmdSpace(size);
}
- bool Server::InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation) {
- ObjectData<DawnTexture>* data = TextureObjects().Allocate(id);
+ bool Server::InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation) {
+ ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
if (data == nullptr) {
return false;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 0f901ad2fc0..28f4c81fba1 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -47,7 +47,7 @@ namespace dawn_wire { namespace server {
class Server : public ServerBase {
public:
- Server(DawnDevice device,
+ Server(WGPUDevice device,
const DawnProcTable& procs,
CommandSerializer* serializer,
MemoryTransferService* memoryTransferService);
@@ -55,38 +55,38 @@ namespace dawn_wire { namespace server {
const volatile char* HandleCommands(const volatile char* commands, size_t size);
- bool InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation);
+ bool InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation);
private:
void* GetCmdSpace(size_t size);
// Forwarding callbacks
- static void ForwardUncapturedError(DawnErrorType type, const char* message, void* userdata);
- static void ForwardPopErrorScope(DawnErrorType type, const char* message, void* userdata);
- static void ForwardBufferMapReadAsync(DawnBufferMapAsyncStatus status,
+ static void ForwardUncapturedError(WGPUErrorType type, const char* message, void* userdata);
+ static void ForwardPopErrorScope(WGPUErrorType type, const char* message, void* userdata);
+ static void ForwardBufferMapReadAsync(WGPUBufferMapAsyncStatus status,
const void* ptr,
uint64_t dataLength,
void* userdata);
- static void ForwardBufferMapWriteAsync(DawnBufferMapAsyncStatus status,
+ static void ForwardBufferMapWriteAsync(WGPUBufferMapAsyncStatus status,
void* ptr,
uint64_t dataLength,
void* userdata);
- static void ForwardFenceCompletedValue(DawnFenceCompletionStatus status, void* userdata);
+ static void ForwardFenceCompletedValue(WGPUFenceCompletionStatus status, void* userdata);
// Error callbacks
- void OnUncapturedError(DawnErrorType type, const char* message);
- void OnDevicePopErrorScope(DawnErrorType type,
+ void OnUncapturedError(WGPUErrorType type, const char* message);
+ void OnDevicePopErrorScope(WGPUErrorType type,
const char* message,
ErrorScopeUserdata* userdata);
- void OnBufferMapReadAsyncCallback(DawnBufferMapAsyncStatus status,
+ void OnBufferMapReadAsyncCallback(WGPUBufferMapAsyncStatus status,
const void* ptr,
uint64_t dataLength,
MapUserdata* userdata);
- void OnBufferMapWriteAsyncCallback(DawnBufferMapAsyncStatus status,
+ void OnBufferMapWriteAsyncCallback(WGPUBufferMapAsyncStatus status,
void* ptr,
uint64_t dataLength,
MapUserdata* userdata);
- void OnFenceCompletedValueUpdated(DawnFenceCompletionStatus status,
+ void OnFenceCompletedValueUpdated(WGPUFenceCompletionStatus status,
FenceCompletionUserdata* userdata);
#include "dawn_wire/server/ServerPrototypes_autogen.inc"
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
index c969beb3154..fb871a59422 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
@@ -92,8 +92,8 @@ namespace dawn_wire { namespace server {
return true;
}
- bool Server::DoDeviceCreateBufferMapped(DawnDevice device,
- const DawnBufferDescriptor* descriptor,
+ bool Server::DoDeviceCreateBufferMapped(WGPUDevice device,
+ const WGPUBufferDescriptor* descriptor,
ObjectHandle bufferResult,
uint64_t handleCreateInfoLength,
const uint8_t* handleCreateInfo) {
@@ -109,7 +109,7 @@ namespace dawn_wire { namespace server {
}
resultData->serial = bufferResult.serial;
- DawnCreateBufferMappedResult result = mProcs.deviceCreateBufferMapped(device, descriptor);
+ WGPUCreateBufferMappedResult result = mProcs.deviceCreateBufferMapped(device, descriptor);
ASSERT(result.buffer != nullptr);
if (result.data == nullptr && result.dataLength != 0) {
// Non-zero dataLength but null data is used to indicate an allocation error.
@@ -140,8 +140,8 @@ namespace dawn_wire { namespace server {
return true;
}
- bool Server::DoDeviceCreateBufferMappedAsync(DawnDevice device,
- const DawnBufferDescriptor* descriptor,
+ bool Server::DoDeviceCreateBufferMappedAsync(WGPUDevice device,
+ const WGPUBufferDescriptor* descriptor,
uint32_t requestSerial,
ObjectHandle bufferResult,
uint64_t handleCreateInfoLength,
@@ -158,8 +158,8 @@ namespace dawn_wire { namespace server {
cmd.buffer = ObjectHandle{bufferResult.id, bufferResult.serial};
cmd.requestSerial = requestSerial;
cmd.status = bufferData->mapWriteState == BufferMapWriteState::Mapped
- ? DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS
- : DAWN_BUFFER_MAP_ASYNC_STATUS_ERROR;
+ ? WGPUBufferMapAsyncStatus_Success
+ : WGPUBufferMapAsyncStatus_Error;
size_t requiredSize = cmd.GetRequiredSize();
char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
@@ -223,7 +223,7 @@ namespace dawn_wire { namespace server {
static_cast<size_t>(writeFlushInfoLength));
}
- void Server::ForwardBufferMapReadAsync(DawnBufferMapAsyncStatus status,
+ void Server::ForwardBufferMapReadAsync(WGPUBufferMapAsyncStatus status,
const void* ptr,
uint64_t dataLength,
void* userdata) {
@@ -231,7 +231,7 @@ namespace dawn_wire { namespace server {
data->server->OnBufferMapReadAsyncCallback(status, ptr, dataLength, data);
}
- void Server::ForwardBufferMapWriteAsync(DawnBufferMapAsyncStatus status,
+ void Server::ForwardBufferMapWriteAsync(WGPUBufferMapAsyncStatus status,
void* ptr,
uint64_t dataLength,
void* userdata) {
@@ -239,7 +239,7 @@ namespace dawn_wire { namespace server {
data->server->OnBufferMapWriteAsyncCallback(status, ptr, dataLength, data);
}
- void Server::OnBufferMapReadAsyncCallback(DawnBufferMapAsyncStatus status,
+ void Server::OnBufferMapReadAsyncCallback(WGPUBufferMapAsyncStatus status,
const void* ptr,
uint64_t dataLength,
MapUserdata* userdata) {
@@ -252,7 +252,7 @@ namespace dawn_wire { namespace server {
}
size_t initialDataInfoLength = 0;
- if (status == DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS) {
+ if (status == WGPUBufferMapAsyncStatus_Success) {
// Get the serialization size of the message to initialize ReadHandle data.
initialDataInfoLength = data->readHandle->SerializeInitialDataSize(ptr, dataLength);
} else {
@@ -271,7 +271,7 @@ namespace dawn_wire { namespace server {
char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
cmd.Serialize(allocatedBuffer);
- if (status == DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS) {
+ if (status == WGPUBufferMapAsyncStatus_Success) {
// Serialize the initialization message into the space after the command.
data->readHandle->SerializeInitialData(ptr, dataLength, allocatedBuffer + commandSize);
@@ -281,7 +281,7 @@ namespace dawn_wire { namespace server {
}
}
- void Server::OnBufferMapWriteAsyncCallback(DawnBufferMapAsyncStatus status,
+ void Server::OnBufferMapWriteAsyncCallback(WGPUBufferMapAsyncStatus status,
void* ptr,
uint64_t dataLength,
MapUserdata* userdata) {
@@ -302,7 +302,7 @@ namespace dawn_wire { namespace server {
char* allocatedBuffer = static_cast<char*>(GetCmdSpace(requiredSize));
cmd.Serialize(allocatedBuffer);
- if (status == DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS) {
+ if (status == WGPUBufferMapAsyncStatus_Success) {
// The in-flight map request returned successfully.
// Move the WriteHandle so it is owned by the buffer.
bufferData->writeHandle = std::move(data->writeHandle);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
index 8713b57c27e..6f27867d2a7 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerDevice.cpp
@@ -16,12 +16,12 @@
namespace dawn_wire { namespace server {
- void Server::ForwardUncapturedError(DawnErrorType type, const char* message, void* userdata) {
+ void Server::ForwardUncapturedError(WGPUErrorType type, const char* message, void* userdata) {
auto server = static_cast<Server*>(userdata);
server->OnUncapturedError(type, message);
}
- void Server::OnUncapturedError(DawnErrorType type, const char* message) {
+ void Server::OnUncapturedError(WGPUErrorType type, const char* message) {
ReturnDeviceUncapturedErrorCallbackCmd cmd;
cmd.type = type;
cmd.message = message;
@@ -31,7 +31,7 @@ namespace dawn_wire { namespace server {
cmd.Serialize(allocatedBuffer);
}
- bool Server::DoDevicePopErrorScope(DawnDevice cDevice, uint64_t requestSerial) {
+ bool Server::DoDevicePopErrorScope(WGPUDevice cDevice, uint64_t requestSerial) {
ErrorScopeUserdata* userdata = new ErrorScopeUserdata;
userdata->server = this;
userdata->requestSerial = requestSerial;
@@ -44,12 +44,12 @@ namespace dawn_wire { namespace server {
}
// static
- void Server::ForwardPopErrorScope(DawnErrorType type, const char* message, void* userdata) {
+ void Server::ForwardPopErrorScope(WGPUErrorType type, const char* message, void* userdata) {
auto* data = reinterpret_cast<ErrorScopeUserdata*>(userdata);
data->server->OnDevicePopErrorScope(type, message, data);
}
- void Server::OnDevicePopErrorScope(DawnErrorType type,
+ void Server::OnDevicePopErrorScope(WGPUErrorType type,
const char* message,
ErrorScopeUserdata* userdata) {
std::unique_ptr<ErrorScopeUserdata> data{userdata};
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
index 79e14c401d3..de056a1d5e9 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerFence.cpp
@@ -18,16 +18,16 @@
namespace dawn_wire { namespace server {
- void Server::ForwardFenceCompletedValue(DawnFenceCompletionStatus status, void* userdata) {
+ void Server::ForwardFenceCompletedValue(WGPUFenceCompletionStatus status, void* userdata) {
auto data = static_cast<FenceCompletionUserdata*>(userdata);
data->server->OnFenceCompletedValueUpdated(status, data);
}
- void Server::OnFenceCompletedValueUpdated(DawnFenceCompletionStatus status,
+ void Server::OnFenceCompletedValueUpdated(WGPUFenceCompletionStatus status,
FenceCompletionUserdata* userdata) {
std::unique_ptr<FenceCompletionUserdata> data(userdata);
- if (status != DAWN_FENCE_COMPLETION_STATUS_SUCCESS) {
+ if (status != WGPUFenceCompletionStatus_Success) {
return;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
index 1851ddad581..9ed58b49845 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
@@ -17,7 +17,7 @@
namespace dawn_wire { namespace server {
- bool Server::DoQueueSignal(DawnQueue cSelf, DawnFence cFence, uint64_t signalValue) {
+ bool Server::DoQueueSignal(WGPUQueue cSelf, WGPUFence cFence, uint64_t signalValue) {
if (cFence == nullptr) {
return false;
}
diff --git a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
index 7e1d769a9b0..93d2be4574d 100644
--- a/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
+++ b/chromium/third_party/dawn/src/include/dawn/EnumClassBitmasks.h
@@ -17,26 +17,7 @@
#include <type_traits>
-namespace dawn {
-
-// std::underlying_type doesn't work in old GLIBC still used in Chrome
-#define CR_GLIBCXX_4_7_0 20120322
-#define CR_GLIBCXX_4_5_4 20120702
-#define CR_GLIBCXX_4_6_4 20121127
-#if defined(__GLIBCXX__) && (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
- __GLIBCXX__ == CR_GLIBCXX_4_6_4)
-# define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
-#endif
-
-#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
- template <typename T>
- struct UnderlyingType {
- using type = __underlying_type(T);
- };
-#else
- template <typename T>
- using UnderlyingType = std::underlying_type<T>;
-#endif
+namespace wgpu {
template <typename T>
struct IsDawnBitmask {
@@ -59,7 +40,7 @@ namespace dawn {
template <typename T>
struct BoolConvertible {
- using Integral = typename UnderlyingType<T>::type;
+ using Integral = typename std::underlying_type<T>::type;
constexpr BoolConvertible(Integral value) : value(value) {
}
@@ -82,19 +63,13 @@ namespace dawn {
}
};
- template <typename T>
- constexpr bool HasZeroOrOneBits(T value) {
- using Integral = typename UnderlyingType<T>::type;
- return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
- }
-
template <typename T1,
typename T2,
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type;
- using Integral = typename UnderlyingType<T>::type;
+ using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
}
@@ -105,7 +80,7 @@ namespace dawn {
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type;
- using Integral = typename UnderlyingType<T>::type;
+ using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
}
@@ -116,7 +91,7 @@ namespace dawn {
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type;
- using Integral = typename UnderlyingType<T>::type;
+ using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
}
@@ -124,7 +99,7 @@ namespace dawn {
template <typename T1>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
using T = typename LowerBitmask<T1>::type;
- using Integral = typename UnderlyingType<T>::type;
+ using Integral = typename std::underlying_type<T>::type;
return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
}
@@ -157,6 +132,13 @@ namespace dawn {
l = l ^ r;
return l;
}
-} // namespace dawn
+
+ template <typename T>
+ constexpr bool HasZeroOrOneBits(T value) {
+ using Integral = typename std::underlying_type<T>::type;
+ return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
+ }
+
+} // namespace wgpu
#endif // DAWN_ENUM_CLASS_BITMASKS_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_export.h b/chromium/third_party/dawn/src/include/dawn/dawn_export.h
deleted file mode 100644
index 354bcfff0fe..00000000000
--- a/chromium/third_party/dawn/src/include/dawn/dawn_export.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef DAWN_EXPORT_H_
-#define DAWN_EXPORT_H_
-
-#if defined(DAWN_SHARED_LIBRARY)
-# if defined(_WIN32)
-# if defined(DAWN_IMPLEMENTATION)
-# define DAWN_EXPORT __declspec(dllexport)
-# else
-# define DAWN_EXPORT __declspec(dllimport)
-# endif
-# else // defined(_WIN32)
-# if defined(DAWN_IMPLEMENTATION)
-# define DAWN_EXPORT __attribute__((visibility("default")))
-# else
-# define DAWN_EXPORT
-# endif
-# endif // defined(_WIN32)
-#else // defined(DAWN_SHARED_LIBRARY)
-# define DAWN_EXPORT
-#endif // defined(DAWN_SHARED_LIBRARY)
-
-#endif // DAWN_EXPORT_H_
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
index ad0e393b248..adeec463352 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_proc.h
@@ -15,8 +15,8 @@
#ifndef DAWN_DAWN_PROC_H_
#define DAWN_DAWN_PROC_H_
-#include "dawn/dawn.h"
#include "dawn/dawn_proc_table.h"
+#include "dawn/webgpu.h"
#ifdef __cplusplus
extern "C" {
@@ -27,7 +27,7 @@ extern "C" {
// default value of the proctable. Setting the proctable back to null is good practice when you
// are done using libdawn_proc since further usage will cause a segfault instead of calling an
// unexpected function.
-DAWN_EXPORT void dawnProcSetProcs(const DawnProcTable* procs);
+WGPU_EXPORT void dawnProcSetProcs(const DawnProcTable* procs);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
index ff83f722bf9..e07e74185cd 100644
--- a/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
+++ b/chromium/third_party/dawn/src/include/dawn/dawn_wsi.h
@@ -15,7 +15,7 @@
#ifndef DAWN_DAWN_WSI_H_
#define DAWN_DAWN_WSI_H_
-#include <dawn/dawn.h>
+#include <dawn/webgpu.h>
// Error message (or nullptr if there was no error)
typedef const char* DawnSwapChainError;
@@ -40,8 +40,8 @@ typedef struct {
/// Configure/reconfigure the swap chain.
DawnSwapChainError (*Configure)(void* userData,
- DawnTextureFormat format,
- DawnTextureUsage allowedUsage,
+ WGPUTextureFormat format,
+ WGPUTextureUsage allowedUsage,
uint32_t width,
uint32_t height);
@@ -55,12 +55,12 @@ typedef struct {
void* userData;
/// For use by the D3D12 and Vulkan backends: how the swapchain will use the texture.
- DawnTextureUsage textureUsage;
+ WGPUTextureUsage textureUsage;
} DawnSwapChainImplementation;
#if defined(DAWN_ENABLE_BACKEND_D3D12) && defined(__cplusplus)
typedef struct {
- DawnDevice device = nullptr;
+ WGPUDevice device = nullptr;
} DawnWSIContextD3D12;
#endif
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index 5a87ab88607..de12d640fc8 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -24,15 +24,17 @@
struct ID3D12Device;
namespace dawn_native { namespace d3d12 {
- DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(DawnDevice device);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(DawnDevice device,
+ DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
+ DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
HWND window);
- DAWN_NATIVE_EXPORT DawnTextureFormat
+ DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
- DAWN_NATIVE_EXPORT DawnTexture WrapSharedHandle(DawnDevice device,
- const DawnTextureDescriptor* descriptor,
- HANDLE sharedHandle);
+ // Note: SharedHandle must be a handle to a texture object.
+ DAWN_NATIVE_EXPORT WGPUTexture WrapSharedHandle(WGPUDevice device,
+ const WGPUTextureDescriptor* descriptor,
+ HANDLE sharedHandle,
+ uint64_t acquireMutexKey);
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12BACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index b3125ed8f4e..b9a1d0eaee4 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_DAWNNATIVE_H_
#define DAWNNATIVE_DAWNNATIVE_H_
-#include <dawn/dawn.h>
#include <dawn/dawn_proc_table.h>
+#include <dawn/webgpu.h>
#include <dawn_native/dawn_native_export.h>
#include <string>
@@ -90,13 +90,14 @@ namespace dawn_native {
DeviceType GetDeviceType() const;
const PCIInfo& GetPCIInfo() const;
std::vector<const char*> GetSupportedExtensions() const;
+ WGPUDeviceProperties GetAdapterProperties() const;
explicit operator bool() const;
// Create a device on this adapter, note that the interface will change to include at least
// a device descriptor and a pointer to backend specific options.
// On an error, nullptr is returned.
- DawnDevice CreateDevice(const DeviceDescriptor* deviceDescriptor = nullptr);
+ WGPUDevice CreateDevice(const DeviceDescriptor* deviceDescriptor = nullptr);
private:
AdapterBase* mImpl = nullptr;
@@ -156,10 +157,10 @@ namespace dawn_native {
DAWN_NATIVE_EXPORT DawnProcTable GetProcs();
// Query the names of all the toggles that are enabled in device
- DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(DawnDevice device);
+ DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
// Backdoor to get the number of lazy clears for testing
- DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(DawnDevice device);
+ DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
// Backdoor to get the order of the ProcMap for testing
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
diff --git a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
index 7588b978965..6e07c058240 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/MetalBackend.h
@@ -33,8 +33,8 @@ typedef __IOSurface* IOSurfaceRef;
#endif //__OBJC__
namespace dawn_native { namespace metal {
- DAWN_NATIVE_EXPORT DawnTexture WrapIOSurface(DawnDevice device,
- const DawnTextureDescriptor* descriptor,
+ DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
+ const WGPUTextureDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane);
@@ -43,12 +43,12 @@ namespace dawn_native { namespace metal {
// does have a global queue of graphics operations, but the command buffers are inserted there
// when they are "scheduled". Submitting other operations before the command buffer is
// scheduled could lead to races in who gets scheduled first and incorrect rendering.
- DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(DawnDevice device);
+ DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
}} // namespace dawn_native::metal
#ifdef __OBJC__
namespace dawn_native { namespace metal {
- DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(DawnDevice device);
+ DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
}} // namespace dawn_native::metal
#endif // __OBJC__
diff --git a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
index 0ebbbc7a20e..05896716a3f 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/OpenGLBackend.h
@@ -28,8 +28,8 @@ namespace dawn_native { namespace opengl {
using PresentCallback = void (*)(void*);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
- CreateNativeSwapChainImpl(DawnDevice device, PresentCallback present, void* presentUserdata);
- DAWN_NATIVE_EXPORT DawnTextureFormat
+ CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
+ DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index f8742591af0..30dbb05b848 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -24,42 +24,73 @@
namespace dawn_native { namespace vulkan {
+ // The different types of ExternalImageDescriptors
+ enum ExternalImageDescriptorType {
+#ifdef __linux__
+ OpaqueFD,
+ DmaBuf,
+#endif // __linux__
+ };
+
// Common properties of external images
- struct ExternalImageDescriptor {
- const DawnTextureDescriptor* cTextureDescriptor; // Must match image creation params
- bool isCleared; // Sets whether the texture will be cleared before use
- VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
- uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
+ public:
+ const ExternalImageDescriptorType type; // Must match the subclass
+ const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
+ bool isCleared; // Sets whether the texture will be cleared before use
+
+ protected:
+ ExternalImageDescriptor(ExternalImageDescriptorType type);
};
- DAWN_NATIVE_EXPORT VkInstance GetInstance(DawnDevice device);
+ DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
+
+ DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
- DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(DawnDevice device,
+ DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
VkSurfaceKHR surface);
- DAWN_NATIVE_EXPORT DawnTextureFormat
+ DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
// Can't use DAWN_PLATFORM_LINUX since header included in both dawn and chrome
#ifdef __linux__
- // Descriptor for opaque file descriptor image import
- struct ExternalImageDescriptorOpaqueFD : ExternalImageDescriptor {
+ // Common properties of external images represented by FDs
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptor {
+ public:
int memoryFD; // A file descriptor from an export of the memory of the image
std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
+
+ protected:
+ ExternalImageDescriptorFD(ExternalImageDescriptorType type);
};
- // Imports an external vulkan image from an opaque file descriptor. Internally, this uses
- // external memory / semaphore extensions to import the image. Then, waits on the provided
- // |descriptor->waitFDs| before the texture can be used. Finally, a signal semaphore
- // can be exported, transferring control back to the caller.
- // On failure, returns a nullptr
- DAWN_NATIVE_EXPORT DawnTexture
- WrapVulkanImageOpaqueFD(DawnDevice cDevice,
- const ExternalImageDescriptorOpaqueFD* descriptor);
+ // Descriptor for opaque file descriptor image import
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
+ ExternalImageDescriptorOpaqueFD();
+
+ VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
+ uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
+ };
+
+ // Descriptor for dma-buf file descriptor image import
+ struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
+ ExternalImageDescriptorDmaBuf();
+
+ uint32_t stride; // Stride of the buffer in bytes
+ uint64_t drmModifier; // DRM modifier of the buffer
+ };
// Exports a signal semaphore from a wrapped texture. This must be called on wrapped
// textures before they are destroyed. On failure, returns -1
- DAWN_NATIVE_EXPORT int ExportSignalSemaphoreOpaqueFD(DawnDevice cDevice,
- DawnTexture cTexture);
+ DAWN_NATIVE_EXPORT int ExportSignalSemaphoreOpaqueFD(WGPUDevice cDevice,
+ WGPUTexture cTexture);
+
+ // Imports external memory into a Vulkan image. Internally, this uses external memory /
+ // semaphore extensions to import the image and wait on the provided synchronizaton
+ // primitives before the texture can be used.
+ // On failure, returns a nullptr.
+ DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice cDevice,
+ const ExternalImageDescriptor* descriptor);
#endif // __linux__
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
index 7d3af270e0f..61b029e986b 100644
--- a/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
+++ b/chromium/third_party/dawn/src/include/dawn_platform/DawnPlatform.h
@@ -21,11 +21,18 @@
namespace dawn_platform {
+ enum class TraceCategory {
+ General, // General trace events
+ Validation, // Dawn validation
+ Recording, // Native command recording
+ GPUWork, // Actual GPU work
+ };
+
class DAWN_NATIVE_EXPORT Platform {
public:
virtual ~Platform() {
}
- virtual const unsigned char* GetTraceCategoryEnabledFlag(const char* name) = 0;
+ virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category) = 0;
virtual double MonotonicallyIncreasingTime() = 0;
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
index 7d60c31a5c2..6120f8b9ce6 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/Wire.h
@@ -17,7 +17,7 @@
#include <cstdint>
-#include "dawn/dawn.h"
+#include "dawn/webgpu.h"
#include "dawn_wire/dawn_wire_export.h"
namespace dawn_wire {
@@ -35,6 +35,16 @@ namespace dawn_wire {
virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
};
+ DAWN_WIRE_EXPORT size_t
+ SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
+
+ DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
+ const WGPUDeviceProperties* deviceProperties,
+ char* serializeBuffer);
+
+ DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
+ const volatile char* deserializeBuffer);
+
} // namespace dawn_wire
#endif // DAWNWIRE_WIRE_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 42b1aa6c6d6..376151e7e87 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -29,7 +29,7 @@ namespace dawn_wire {
}
struct ReservedTexture {
- DawnTexture texture;
+ WGPUTexture texture;
uint32_t id;
uint32_t generation;
};
@@ -44,12 +44,12 @@ namespace dawn_wire {
WireClient(const WireClientDescriptor& descriptor);
~WireClient();
- DawnDevice GetDevice() const;
+ WGPUDevice GetDevice() const;
DawnProcTable GetProcs() const;
const volatile char* HandleCommands(const volatile char* commands,
size_t size) override final;
- ReservedTexture ReserveTexture(DawnDevice device);
+ ReservedTexture ReserveTexture(WGPUDevice device);
private:
std::unique_ptr<client::Client> mImpl;
@@ -74,8 +74,8 @@ namespace dawn_wire {
// Imported memory implementation needs to override these to create Read/Write
// handles associated with a particular buffer. The client should receive a file
// descriptor for the buffer out-of-band.
- virtual ReadHandle* CreateReadHandle(DawnBuffer, uint64_t offset, size_t size);
- virtual WriteHandle* CreateWriteHandle(DawnBuffer, uint64_t offset, size_t size);
+ virtual ReadHandle* CreateReadHandle(WGPUBuffer, uint64_t offset, size_t size);
+ virtual WriteHandle* CreateWriteHandle(WGPUBuffer, uint64_t offset, size_t size);
class DAWN_WIRE_EXPORT ReadHandle {
public:
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index e018b5bae47..f965a0469fd 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -29,7 +29,7 @@ namespace dawn_wire {
}
struct DAWN_WIRE_EXPORT WireServerDescriptor {
- DawnDevice device;
+ WGPUDevice device;
const DawnProcTable* procs;
CommandSerializer* serializer;
server::MemoryTransferService* memoryTransferService = nullptr;
@@ -43,7 +43,7 @@ namespace dawn_wire {
const volatile char* HandleCommands(const volatile char* commands,
size_t size) override final;
- bool InjectTexture(DawnTexture texture, uint32_t id, uint32_t generation);
+ bool InjectTexture(WGPUTexture texture, uint32_t id, uint32_t generation);
private:
std::unique_ptr<server::Server> mImpl;
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.cpp b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
index 39ec8cfb4fe..54aa6078dc4 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.cpp
@@ -25,22 +25,22 @@
namespace utils {
#if defined(DAWN_ENABLE_BACKEND_D3D12)
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, DawnDevice device);
+ BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL)
- BackendBinding* CreateMetalBinding(GLFWwindow* window, DawnDevice device);
+ BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_NULL)
- BackendBinding* CreateNullBinding(GLFWwindow* window, DawnDevice device);
+ BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, DawnDevice device);
+ BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
#endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, DawnDevice device);
+ BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
#endif
- BackendBinding::BackendBinding(GLFWwindow* window, DawnDevice device)
+ BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
: mWindow(window), mDevice(device) {
}
@@ -75,7 +75,7 @@ namespace utils {
BackendBinding* CreateBinding(dawn_native::BackendType type,
GLFWwindow* window,
- DawnDevice device) {
+ WGPUDevice device) {
switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D12)
case dawn_native::BackendType::D3D12:
diff --git a/chromium/third_party/dawn/src/utils/BackendBinding.h b/chromium/third_party/dawn/src/utils/BackendBinding.h
index 26b8a827121..f8d35b09d6f 100644
--- a/chromium/third_party/dawn/src/utils/BackendBinding.h
+++ b/chromium/third_party/dawn/src/utils/BackendBinding.h
@@ -15,7 +15,7 @@
#ifndef UTILS_BACKENDBINDING_H_
#define UTILS_BACKENDBINDING_H_
-#include "dawn/dawn.h"
+#include "dawn/webgpu.h"
#include "dawn_native/DawnNative.h"
struct GLFWwindow;
@@ -27,13 +27,13 @@ namespace utils {
virtual ~BackendBinding() = default;
virtual uint64_t GetSwapChainImplementation() = 0;
- virtual DawnTextureFormat GetPreferredSwapChainTextureFormat() = 0;
+ virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
protected:
- BackendBinding(GLFWwindow* window, DawnDevice device);
+ BackendBinding(GLFWwindow* window, WGPUDevice device);
GLFWwindow* mWindow = nullptr;
- DawnDevice mDevice = nullptr;
+ WGPUDevice mDevice = nullptr;
};
void SetupGLFWWindowHintsForBackend(dawn_native::BackendType type);
@@ -42,7 +42,7 @@ namespace utils {
dawn_native::BackendType type);
BackendBinding* CreateBinding(dawn_native::BackendType type,
GLFWwindow* window,
- DawnDevice device);
+ WGPUDevice device);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp
index 82427379ed1..8b076e11d9c 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp
+++ b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.cpp
@@ -14,12 +14,12 @@
#include "utils/ComboRenderBundleEncoderDescriptor.h"
-#include "utils/DawnHelpers.h"
+#include "utils/WGPUHelpers.h"
namespace utils {
ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
- dawn::RenderBundleEncoderDescriptor* descriptor = this;
+ wgpu::RenderBundleEncoderDescriptor* descriptor = this;
descriptor->colorFormatsCount = 0;
descriptor->colorFormats = &cColorFormats[0];
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h
index dbf833583d5..cd6044b59e9 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h
+++ b/chromium/third_party/dawn/src/utils/ComboRenderBundleEncoderDescriptor.h
@@ -15,7 +15,7 @@
#ifndef UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
#define UTILS_COMBORENDERBUNDLEENCODERDESCRIPTOR_H_
-#include <dawn/dawncpp.h>
+#include <dawn/webgpu_cpp.h>
#include "common/Constants.h"
@@ -23,11 +23,11 @@
namespace utils {
- class ComboRenderBundleEncoderDescriptor : public dawn::RenderBundleEncoderDescriptor {
+ class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
public:
ComboRenderBundleEncoderDescriptor();
- std::array<dawn::TextureFormat, kMaxColorAttachments> cColorFormats;
+ std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
index 66be4dee0a4..5fa40c42663 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.cpp
@@ -14,42 +14,43 @@
#include "utils/ComboRenderPipelineDescriptor.h"
-#include "utils/DawnHelpers.h"
+#include "utils/WGPUHelpers.h"
namespace utils {
- ComboVertexInputDescriptor::ComboVertexInputDescriptor() {
- dawn::VertexInputDescriptor* descriptor = this;
+ ComboVertexStateDescriptor::ComboVertexStateDescriptor() {
+ wgpu::VertexStateDescriptor* descriptor = this;
- descriptor->indexFormat = dawn::IndexFormat::Uint32;
- descriptor->bufferCount = 0;
+ descriptor->indexFormat = wgpu::IndexFormat::Uint32;
+ descriptor->vertexBufferCount = 0;
// Fill the default values for vertexBuffers and vertexAttributes in buffers.
- dawn::VertexAttributeDescriptor vertexAttribute;
+ wgpu::VertexAttributeDescriptor vertexAttribute;
vertexAttribute.shaderLocation = 0;
vertexAttribute.offset = 0;
- vertexAttribute.format = dawn::VertexFormat::Float;
+ vertexAttribute.format = wgpu::VertexFormat::Float;
for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
cAttributes[i] = vertexAttribute;
}
for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
- cBuffers[i].stride = 0;
- cBuffers[i].stepMode = dawn::InputStepMode::Vertex;
- cBuffers[i].attributeCount = 0;
- cBuffers[i].attributes = nullptr;
+ cVertexBuffers[i].arrayStride = 0;
+ cVertexBuffers[i].stepMode = wgpu::InputStepMode::Vertex;
+ cVertexBuffers[i].attributeCount = 0;
+ cVertexBuffers[i].attributes = nullptr;
}
- // cBuffers[i].attributes points to somewhere in cAttributes. cBuffers[0].attributes
- // points to &cAttributes[0] by default. Assuming cBuffers[0] has two attributes, then
- // cBuffers[1].attributes should point to &cAttributes[2]. Likewise, if cBuffers[1]
- // has 3 attributes, then cBuffers[2].attributes should point to &cAttributes[5].
- cBuffers[0].attributes = &cAttributes[0];
- descriptor->buffers = &cBuffers[0];
+ // cVertexBuffers[i].attributes points to somewhere in cAttributes.
+ // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+ // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
+ // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
+ // cVertexBuffers[2].attributes should point to &cAttributes[5].
+ cVertexBuffers[0].attributes = &cAttributes[0];
+ descriptor->vertexBuffers = &cVertexBuffers[0];
}
- ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor(const dawn::Device& device) {
- dawn::RenderPipelineDescriptor* descriptor = this;
+ ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor(const wgpu::Device& device) {
+ wgpu::RenderPipelineDescriptor* descriptor = this;
- descriptor->primitiveTopology = dawn::PrimitiveTopology::TriangleList;
+ descriptor->primitiveTopology = wgpu::PrimitiveTopology::TriangleList;
descriptor->sampleCount = 1;
// Set defaults for the vertex stage descriptor.
@@ -62,12 +63,12 @@ namespace utils {
}
// Set defaults for the input state descriptors.
- descriptor->vertexInput = &cVertexInput;
+ descriptor->vertexState = &cVertexState;
// Set defaults for the rasterization state descriptor.
{
- cRasterizationState.frontFace = dawn::FrontFace::CCW;
- cRasterizationState.cullMode = dawn::CullMode::None;
+ cRasterizationState.frontFace = wgpu::FrontFace::CCW;
+ cRasterizationState.cullMode = wgpu::CullMode::None;
cRasterizationState.depthBias = 0;
cRasterizationState.depthBiasSlopeScale = 0.0;
@@ -80,15 +81,15 @@ namespace utils {
descriptor->colorStateCount = 1;
descriptor->colorStates = cColorStates.data();
- dawn::BlendDescriptor blend;
- blend.operation = dawn::BlendOperation::Add;
- blend.srcFactor = dawn::BlendFactor::One;
- blend.dstFactor = dawn::BlendFactor::Zero;
- dawn::ColorStateDescriptor colorStateDescriptor;
- colorStateDescriptor.format = dawn::TextureFormat::RGBA8Unorm;
+ wgpu::BlendDescriptor blend;
+ blend.operation = wgpu::BlendOperation::Add;
+ blend.srcFactor = wgpu::BlendFactor::One;
+ blend.dstFactor = wgpu::BlendFactor::Zero;
+ wgpu::ColorStateDescriptor colorStateDescriptor;
+ colorStateDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
colorStateDescriptor.alphaBlend = blend;
colorStateDescriptor.colorBlend = blend;
- colorStateDescriptor.writeMask = dawn::ColorWriteMask::All;
+ colorStateDescriptor.writeMask = wgpu::ColorWriteMask::All;
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
cColorStates[i] = colorStateDescriptor;
}
@@ -96,23 +97,21 @@ namespace utils {
// Set defaults for the depth stencil state descriptors.
{
- dawn::StencilStateFaceDescriptor stencilFace;
- stencilFace.compare = dawn::CompareFunction::Always;
- stencilFace.failOp = dawn::StencilOperation::Keep;
- stencilFace.depthFailOp = dawn::StencilOperation::Keep;
- stencilFace.passOp = dawn::StencilOperation::Keep;
+ wgpu::StencilStateFaceDescriptor stencilFace;
+ stencilFace.compare = wgpu::CompareFunction::Always;
+ stencilFace.failOp = wgpu::StencilOperation::Keep;
+ stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+ stencilFace.passOp = wgpu::StencilOperation::Keep;
- cDepthStencilState.format = dawn::TextureFormat::Depth24PlusStencil8;
+ cDepthStencilState.format = wgpu::TextureFormat::Depth24PlusStencil8;
cDepthStencilState.depthWriteEnabled = false;
- cDepthStencilState.depthCompare = dawn::CompareFunction::Always;
+ cDepthStencilState.depthCompare = wgpu::CompareFunction::Always;
cDepthStencilState.stencilBack = stencilFace;
cDepthStencilState.stencilFront = stencilFace;
cDepthStencilState.stencilReadMask = 0xff;
cDepthStencilState.stencilWriteMask = 0xff;
descriptor->depthStencilState = nullptr;
}
-
- descriptor->layout = utils::MakeBasicPipelineLayout(device, nullptr);
}
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
index 2be2f0ac92d..067b79ef92c 100644
--- a/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
+++ b/chromium/third_party/dawn/src/utils/ComboRenderPipelineDescriptor.h
@@ -15,7 +15,7 @@
#ifndef UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
#define UTILS_COMBORENDERPIPELINEDESCRIPTOR_H_
-#include <dawn/dawncpp.h>
+#include <dawn/webgpu_cpp.h>
#include "common/Constants.h"
@@ -23,29 +23,29 @@
namespace utils {
- class ComboVertexInputDescriptor : public dawn::VertexInputDescriptor {
+ class ComboVertexStateDescriptor : public wgpu::VertexStateDescriptor {
public:
- ComboVertexInputDescriptor();
+ ComboVertexStateDescriptor();
- std::array<dawn::VertexBufferDescriptor, kMaxVertexBuffers> cBuffers;
- std::array<dawn::VertexAttributeDescriptor, kMaxVertexAttributes> cAttributes;
+ std::array<wgpu::VertexBufferLayoutDescriptor, kMaxVertexBuffers> cVertexBuffers;
+ std::array<wgpu::VertexAttributeDescriptor, kMaxVertexAttributes> cAttributes;
};
- class ComboRenderPipelineDescriptor : public dawn::RenderPipelineDescriptor {
+ class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
public:
- ComboRenderPipelineDescriptor(const dawn::Device& device);
+ ComboRenderPipelineDescriptor(const wgpu::Device& device);
ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
- dawn::ProgrammableStageDescriptor cFragmentStage;
+ wgpu::ProgrammableStageDescriptor cFragmentStage;
- ComboVertexInputDescriptor cVertexInput;
- dawn::RasterizationStateDescriptor cRasterizationState;
- std::array<dawn::ColorStateDescriptor, kMaxColorAttachments> cColorStates;
- dawn::DepthStencilStateDescriptor cDepthStencilState;
+ ComboVertexStateDescriptor cVertexState;
+ wgpu::RasterizationStateDescriptor cRasterizationState;
+ std::array<wgpu::ColorStateDescriptor, kMaxColorAttachments> cColorStates;
+ wgpu::DepthStencilStateDescriptor cDepthStencilState;
};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/D3D12Binding.cpp b/chromium/third_party/dawn/src/utils/D3D12Binding.cpp
index b2f48809b56..1708b147824 100644
--- a/chromium/third_party/dawn/src/utils/D3D12Binding.cpp
+++ b/chromium/third_party/dawn/src/utils/D3D12Binding.cpp
@@ -27,7 +27,7 @@ namespace utils {
class D3D12Binding : public BackendBinding {
public:
- D3D12Binding(GLFWwindow* window, DawnDevice device) : BackendBinding(window, device) {
+ D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
}
uint64_t GetSwapChainImplementation() override {
@@ -39,7 +39,7 @@ namespace utils {
return reinterpret_cast<uint64_t>(&mSwapchainImpl);
}
- DawnTextureFormat GetPreferredSwapChainTextureFormat() override {
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
ASSERT(mSwapchainImpl.userData != nullptr);
return dawn_native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
}
@@ -48,7 +48,7 @@ namespace utils {
DawnSwapChainImplementation mSwapchainImpl = {};
};
- BackendBinding* CreateD3D12Binding(GLFWwindow* window, DawnDevice device) {
+ BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
return new D3D12Binding(window, device);
}
diff --git a/chromium/third_party/dawn/src/utils/MetalBinding.mm b/chromium/third_party/dawn/src/utils/MetalBinding.mm
index 0dbdb4d7381..d8875d54e2f 100644
--- a/chromium/third_party/dawn/src/utils/MetalBinding.mm
+++ b/chromium/third_party/dawn/src/utils/MetalBinding.mm
@@ -42,11 +42,11 @@ namespace utils {
mCommandQueue = ctx->queue;
}
- DawnSwapChainError Configure(DawnTextureFormat format,
- DawnTextureUsage usage,
+ DawnSwapChainError Configure(WGPUTextureFormat format,
+ WGPUTextureUsage usage,
uint32_t width,
uint32_t height) {
- if (format != DAWN_TEXTURE_FORMAT_BGRA8_UNORM) {
+ if (format != WGPUTextureFormat_BGRA8Unorm) {
return "unsupported format";
}
ASSERT(width > 0);
@@ -65,7 +65,7 @@ namespace utils {
[mLayer setDrawableSize:size];
constexpr uint32_t kFramebufferOnlyTextureUsages =
- DAWN_TEXTURE_USAGE_OUTPUT_ATTACHMENT | DAWN_TEXTURE_USAGE_PRESENT;
+ WGPUTextureUsage_OutputAttachment | WGPUTextureUsage_Present;
bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
if (hasOnlyFramebufferUsages) {
[mLayer setFramebufferOnly:YES];
@@ -110,7 +110,7 @@ namespace utils {
class MetalBinding : public BackendBinding {
public:
- MetalBinding(GLFWwindow* window, DawnDevice device) : BackendBinding(window, device) {
+ MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
}
uint64_t GetSwapChainImplementation() override {
@@ -121,15 +121,15 @@ namespace utils {
return reinterpret_cast<uint64_t>(&mSwapchainImpl);
}
- DawnTextureFormat GetPreferredSwapChainTextureFormat() override {
- return DAWN_TEXTURE_FORMAT_BGRA8_UNORM;
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_BGRA8Unorm;
}
private:
DawnSwapChainImplementation mSwapchainImpl = {};
};
- BackendBinding* CreateMetalBinding(GLFWwindow* window, DawnDevice device) {
+ BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
return new MetalBinding(window, device);
}
}
diff --git a/chromium/third_party/dawn/src/utils/NullBinding.cpp b/chromium/third_party/dawn/src/utils/NullBinding.cpp
index da268e5d786..f47b81c6745 100644
--- a/chromium/third_party/dawn/src/utils/NullBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/NullBinding.cpp
@@ -23,7 +23,7 @@ namespace utils {
class NullBinding : public BackendBinding {
public:
- NullBinding(GLFWwindow* window, DawnDevice device) : BackendBinding(window, device) {
+ NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
}
uint64_t GetSwapChainImplementation() override {
@@ -32,15 +32,15 @@ namespace utils {
}
return reinterpret_cast<uint64_t>(&mSwapchainImpl);
}
- DawnTextureFormat GetPreferredSwapChainTextureFormat() override {
- return DAWN_TEXTURE_FORMAT_RGBA8_UNORM;
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+ return WGPUTextureFormat_RGBA8Unorm;
}
private:
DawnSwapChainImplementation mSwapchainImpl = {};
};
- BackendBinding* CreateNullBinding(GLFWwindow* window, DawnDevice device) {
+ BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
return new NullBinding(window, device);
}
diff --git a/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp b/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp
index 4a20e5d4c3f..f48f426c7cd 100644
--- a/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/OpenGLBinding.cpp
@@ -27,7 +27,7 @@ namespace utils {
class OpenGLBinding : public BackendBinding {
public:
- OpenGLBinding(GLFWwindow* window, DawnDevice device) : BackendBinding(window, device) {
+ OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
}
uint64_t GetSwapChainImplementation() override {
@@ -40,7 +40,7 @@ namespace utils {
return reinterpret_cast<uint64_t>(&mSwapchainImpl);
}
- DawnTextureFormat GetPreferredSwapChainTextureFormat() override {
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
return dawn_native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
}
@@ -48,7 +48,7 @@ namespace utils {
DawnSwapChainImplementation mSwapchainImpl = {};
};
- BackendBinding* CreateOpenGLBinding(GLFWwindow* window, DawnDevice device) {
+ BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
return new OpenGLBinding(window, device);
}
diff --git a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp
index 77f86ec4b29..aa0bc8ca284 100644
--- a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.cpp
@@ -34,26 +34,71 @@ namespace utils {
// (Here and/or in the caller?) It might be good to make the wire receiver get a nullptr
// instead of pointer to zero-sized allocation in mBuffer.
+ // Cannot have commands in mBuffer and mLargeBuffer at same time.
+ ASSERT(mOffset == 0 || mLargeBufferCmdSize == 0);
+
if (size > sizeof(mBuffer)) {
- return nullptr;
+ // Flush current cmds in mBuffer to keep order.
+ if (mOffset > 0) {
+ if (!Flush()) {
+ return nullptr;
+ }
+ return GetCmdSpace(size);
+ }
+
+ // Resize large buffer to the size that can
+ // contain incoming command if needed.
+ if (mLargeBuffer.size() < size) {
+ mLargeBuffer.resize(size);
+ }
+
+ // Record whole cmd space.
+ mLargeBufferCmdSize = size;
+
+ return mLargeBuffer.data();
+ }
+
+ // Trigger flush if large buffer contain cmds.
+ if (mLargeBufferCmdSize > 0) {
+ if (!Flush()) {
+ return nullptr;
+ }
+ return GetCmdSpace(size);
}
+ // Need to flush large buffer first.
+ ASSERT(mLargeBufferCmdSize == 0);
+
char* result = &mBuffer[mOffset];
- mOffset += size;
- if (mOffset > sizeof(mBuffer)) {
+ if (sizeof(mBuffer) - size < mOffset) {
if (!Flush()) {
return nullptr;
}
return GetCmdSpace(size);
}
+ mOffset += size;
+
return result;
}
bool TerribleCommandBuffer::Flush() {
- bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
+ // Cannot have commands in mBuffer and mLargeBuffer at same time.
+ ASSERT(mOffset == 0 || mLargeBufferCmdSize == 0);
+
+ bool success = false;
+ // Big buffer not empty, flush it!
+ if (mLargeBufferCmdSize > 0) {
+ success = mHandler->HandleCommands(mLargeBuffer.data(), mLargeBufferCmdSize) != nullptr;
+ // Clear big command buffers.
+ mLargeBufferCmdSize = 0;
+ return success;
+ }
+
+ success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
mOffset = 0;
+
return success;
}
diff --git a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h
index b5affc85530..9a41bb36112 100644
--- a/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h
+++ b/chromium/third_party/dawn/src/utils/TerribleCommandBuffer.h
@@ -34,7 +34,11 @@ namespace utils {
private:
dawn_wire::CommandHandler* mHandler = nullptr;
size_t mOffset = 0;
- char mBuffer[10000000];
+ // Cannot have commands in mBuffer and mLargeBuffer
+ // at the same time to ensure commands order.
+ char mBuffer[1000000];
+ std::vector<char> mLargeBuffer;
+ size_t mLargeBufferCmdSize = 0;
};
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/VulkanBinding.cpp b/chromium/third_party/dawn/src/utils/VulkanBinding.cpp
index 61386a4eb12..577c3bce6b4 100644
--- a/chromium/third_party/dawn/src/utils/VulkanBinding.cpp
+++ b/chromium/third_party/dawn/src/utils/VulkanBinding.cpp
@@ -26,7 +26,7 @@ namespace utils {
class VulkanBinding : public BackendBinding {
public:
- VulkanBinding(GLFWwindow* window, DawnDevice device) : BackendBinding(window, device) {
+ VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
}
uint64_t GetSwapChainImplementation() override {
@@ -41,7 +41,7 @@ namespace utils {
}
return reinterpret_cast<uint64_t>(&mSwapchainImpl);
}
- DawnTextureFormat GetPreferredSwapChainTextureFormat() override {
+ WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
ASSERT(mSwapchainImpl.userData != nullptr);
return dawn_native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
}
@@ -50,7 +50,7 @@ namespace utils {
DawnSwapChainImplementation mSwapchainImpl = {};
};
- BackendBinding* CreateVulkanBinding(GLFWwindow* window, DawnDevice device) {
+ BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
return new VulkanBinding(window, device);
}
diff --git a/chromium/third_party/dawn/src/utils/DawnHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index 1ad9adf09e7..dcf8a2baade 100644
--- a/chromium/third_party/dawn/src/utils/DawnHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "utils/DawnHelpers.h"
+#include "utils/WGPUHelpers.h"
#include "common/Assert.h"
#include "common/Constants.h"
@@ -41,8 +41,8 @@ namespace utils {
}
}
- dawn::ShaderModule CreateShaderModuleFromResult(
- const dawn::Device& device,
+ wgpu::ShaderModule CreateShaderModuleFromResult(
+ const wgpu::Device& device,
const shaderc::SpvCompilationResult& result) {
// result.cend and result.cbegin return pointers to uint32_t.
const uint32_t* resultBegin = result.cbegin();
@@ -51,7 +51,7 @@ namespace utils {
ptrdiff_t resultSize = resultEnd - resultBegin;
// SetSource takes data as uint32_t*.
- dawn::ShaderModuleDescriptor descriptor;
+ wgpu::ShaderModuleDescriptor descriptor;
descriptor.codeSize = static_cast<uint32_t>(resultSize);
descriptor.code = result.cbegin();
return device.CreateShaderModule(&descriptor);
@@ -59,7 +59,7 @@ namespace utils {
} // anonymous namespace
- dawn::ShaderModule CreateShaderModule(const dawn::Device& device,
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device,
SingleShaderStage stage,
const char* source) {
shaderc_shader_kind kind = ShadercShaderKind(stage);
@@ -102,7 +102,7 @@ namespace utils {
return CreateShaderModuleFromResult(device, result);
}
- dawn::ShaderModule CreateShaderModuleFromASM(const dawn::Device& device, const char* source) {
+ wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
shaderc::Compiler compiler;
shaderc::SpvCompilationResult result = compiler.AssembleToSpv(source, strlen(source));
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
@@ -113,38 +113,38 @@ namespace utils {
return CreateShaderModuleFromResult(device, result);
}
- dawn::Buffer CreateBufferFromData(const dawn::Device& device,
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
const void* data,
uint64_t size,
- dawn::BufferUsage usage) {
- dawn::BufferDescriptor descriptor;
+ wgpu::BufferUsage usage) {
+ wgpu::BufferDescriptor descriptor;
descriptor.size = size;
- descriptor.usage = usage | dawn::BufferUsage::CopyDst;
+ descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
- dawn::Buffer buffer = device.CreateBuffer(&descriptor);
+ wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
buffer.SetSubData(0, size, data);
return buffer;
}
ComboRenderPassDescriptor::ComboRenderPassDescriptor(
- std::initializer_list<dawn::TextureView> colorAttachmentInfo,
- dawn::TextureView depthStencil) {
+ std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil) {
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
- cColorAttachments[i].loadOp = dawn::LoadOp::Clear;
- cColorAttachments[i].storeOp = dawn::StoreOp::Store;
+ cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+ cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
cColorAttachments[i].clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
}
cDepthStencilAttachmentInfo.clearDepth = 1.0f;
cDepthStencilAttachmentInfo.clearStencil = 0;
- cDepthStencilAttachmentInfo.depthLoadOp = dawn::LoadOp::Clear;
- cDepthStencilAttachmentInfo.depthStoreOp = dawn::StoreOp::Store;
- cDepthStencilAttachmentInfo.stencilLoadOp = dawn::LoadOp::Clear;
- cDepthStencilAttachmentInfo.stencilStoreOp = dawn::StoreOp::Store;
+ cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+ cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+ cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
uint32_t colorAttachmentIndex = 0;
- for (const dawn::TextureView& colorAttachment : colorAttachmentInfo) {
+ for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
if (colorAttachment.Get() != nullptr) {
cColorAttachments[colorAttachmentIndex].attachment = colorAttachment;
}
@@ -182,14 +182,14 @@ namespace utils {
: width(0),
height(0),
color(nullptr),
- colorFormat(dawn::TextureFormat::RGBA8Unorm),
+ colorFormat(wgpu::TextureFormat::RGBA8Unorm),
renderPassInfo({}) {
}
BasicRenderPass::BasicRenderPass(uint32_t texWidth,
uint32_t texHeight,
- dawn::Texture colorAttachment,
- dawn::TextureFormat textureFormat)
+ wgpu::Texture colorAttachment,
+ wgpu::TextureFormat textureFormat)
: width(texWidth),
height(texHeight),
color(colorAttachment),
@@ -197,13 +197,13 @@ namespace utils {
renderPassInfo({colorAttachment.CreateView()}) {
}
- BasicRenderPass CreateBasicRenderPass(const dawn::Device& device,
+ BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
uint32_t width,
uint32_t height) {
DAWN_ASSERT(width > 0 && height > 0);
- dawn::TextureDescriptor descriptor;
- descriptor.dimension = dawn::TextureDimension::e2D;
+ wgpu::TextureDescriptor descriptor;
+ descriptor.dimension = wgpu::TextureDimension::e2D;
descriptor.size.width = width;
descriptor.size.height = height;
descriptor.size.depth = 1;
@@ -211,17 +211,17 @@ namespace utils {
descriptor.sampleCount = 1;
descriptor.format = BasicRenderPass::kDefaultColorFormat;
descriptor.mipLevelCount = 1;
- descriptor.usage = dawn::TextureUsage::OutputAttachment | dawn::TextureUsage::CopySrc;
- dawn::Texture color = device.CreateTexture(&descriptor);
+ descriptor.usage = wgpu::TextureUsage::OutputAttachment | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture color = device.CreateTexture(&descriptor);
return BasicRenderPass(width, height, color);
}
- dawn::BufferCopyView CreateBufferCopyView(dawn::Buffer buffer,
+ wgpu::BufferCopyView CreateBufferCopyView(wgpu::Buffer buffer,
uint64_t offset,
uint32_t rowPitch,
uint32_t imageHeight) {
- dawn::BufferCopyView bufferCopyView;
+ wgpu::BufferCopyView bufferCopyView;
bufferCopyView.buffer = buffer;
bufferCopyView.offset = offset;
bufferCopyView.rowPitch = rowPitch;
@@ -230,11 +230,11 @@ namespace utils {
return bufferCopyView;
}
- dawn::TextureCopyView CreateTextureCopyView(dawn::Texture texture,
+ wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
uint32_t mipLevel,
uint32_t arrayLayer,
- dawn::Origin3D origin) {
- dawn::TextureCopyView textureCopyView;
+ wgpu::Origin3D origin) {
+ wgpu::TextureCopyView textureCopyView;
textureCopyView.texture = texture;
textureCopyView.mipLevel = mipLevel;
textureCopyView.arrayLayer = arrayLayer;
@@ -243,25 +243,25 @@ namespace utils {
return textureCopyView;
}
- dawn::SamplerDescriptor GetDefaultSamplerDescriptor() {
- dawn::SamplerDescriptor desc;
+ wgpu::SamplerDescriptor GetDefaultSamplerDescriptor() {
+ wgpu::SamplerDescriptor desc;
- desc.minFilter = dawn::FilterMode::Linear;
- desc.magFilter = dawn::FilterMode::Linear;
- desc.mipmapFilter = dawn::FilterMode::Linear;
- desc.addressModeU = dawn::AddressMode::Repeat;
- desc.addressModeV = dawn::AddressMode::Repeat;
- desc.addressModeW = dawn::AddressMode::Repeat;
+ desc.minFilter = wgpu::FilterMode::Linear;
+ desc.magFilter = wgpu::FilterMode::Linear;
+ desc.mipmapFilter = wgpu::FilterMode::Linear;
+ desc.addressModeU = wgpu::AddressMode::Repeat;
+ desc.addressModeV = wgpu::AddressMode::Repeat;
+ desc.addressModeW = wgpu::AddressMode::Repeat;
desc.lodMinClamp = kLodMin;
desc.lodMaxClamp = kLodMax;
- desc.compare = dawn::CompareFunction::Never;
+ desc.compare = wgpu::CompareFunction::Never;
return desc;
}
- dawn::PipelineLayout MakeBasicPipelineLayout(const dawn::Device& device,
- const dawn::BindGroupLayout* bindGroupLayout) {
- dawn::PipelineLayoutDescriptor descriptor;
+ wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout) {
+ wgpu::PipelineLayoutDescriptor descriptor;
if (bindGroupLayout != nullptr) {
descriptor.bindGroupLayoutCount = 1;
descriptor.bindGroupLayouts = bindGroupLayout;
@@ -272,43 +272,43 @@ namespace utils {
return device.CreatePipelineLayout(&descriptor);
}
- dawn::BindGroupLayout MakeBindGroupLayout(
- const dawn::Device& device,
- std::initializer_list<dawn::BindGroupLayoutBinding> bindingsInitializer) {
- constexpr dawn::ShaderStage kNoStages{};
+ wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<wgpu::BindGroupLayoutBinding> bindingsInitializer) {
+ constexpr wgpu::ShaderStage kNoStages{};
- std::vector<dawn::BindGroupLayoutBinding> bindings;
- for (const dawn::BindGroupLayoutBinding& binding : bindingsInitializer) {
+ std::vector<wgpu::BindGroupLayoutBinding> bindings;
+ for (const wgpu::BindGroupLayoutBinding& binding : bindingsInitializer) {
if (binding.visibility != kNoStages) {
bindings.push_back(binding);
}
}
- dawn::BindGroupLayoutDescriptor descriptor;
+ wgpu::BindGroupLayoutDescriptor descriptor;
descriptor.bindingCount = static_cast<uint32_t>(bindings.size());
descriptor.bindings = bindings.data();
return device.CreateBindGroupLayout(&descriptor);
}
BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const dawn::Sampler& sampler)
+ const wgpu::Sampler& sampler)
: binding(binding), sampler(sampler) {
}
BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const dawn::TextureView& textureView)
+ const wgpu::TextureView& textureView)
: binding(binding), textureView(textureView) {
}
BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
- const dawn::Buffer& buffer,
+ const wgpu::Buffer& buffer,
uint64_t offset,
uint64_t size)
: binding(binding), buffer(buffer), offset(offset), size(size) {
}
- dawn::BindGroupBinding BindingInitializationHelper::GetAsBinding() const {
- dawn::BindGroupBinding result;
+ wgpu::BindGroupBinding BindingInitializationHelper::GetAsBinding() const {
+ wgpu::BindGroupBinding result;
result.binding = binding;
result.sampler = sampler;
@@ -320,16 +320,16 @@ namespace utils {
return result;
}
- dawn::BindGroup MakeBindGroup(
- const dawn::Device& device,
- const dawn::BindGroupLayout& layout,
+ wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
std::initializer_list<BindingInitializationHelper> bindingsInitializer) {
- std::vector<dawn::BindGroupBinding> bindings;
+ std::vector<wgpu::BindGroupBinding> bindings;
for (const BindingInitializationHelper& helper : bindingsInitializer) {
bindings.push_back(helper.GetAsBinding());
}
- dawn::BindGroupDescriptor descriptor;
+ wgpu::BindGroupDescriptor descriptor;
descriptor.layout = layout;
descriptor.bindingCount = bindings.size();
descriptor.bindings = bindings.data();
diff --git a/chromium/third_party/dawn/src/utils/DawnHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index 337ae8508c5..c930d6a65a6 100644
--- a/chromium/third_party/dawn/src/utils/DawnHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -15,7 +15,7 @@
#ifndef UTILS_DAWNHELPERS_H_
#define UTILS_DAWNHELPERS_H_
-#include <dawn/dawncpp.h>
+#include <dawn/webgpu_cpp.h>
#include <array>
#include <initializer_list>
@@ -28,42 +28,42 @@ namespace utils {
enum class SingleShaderStage { Vertex, Fragment, Compute };
- dawn::ShaderModule CreateShaderModule(const dawn::Device& device,
+ wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device,
SingleShaderStage stage,
const char* source);
- dawn::ShaderModule CreateShaderModuleFromASM(const dawn::Device& device, const char* source);
+ wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
- dawn::Buffer CreateBufferFromData(const dawn::Device& device,
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
const void* data,
uint64_t size,
- dawn::BufferUsage usage);
+ wgpu::BufferUsage usage);
template <typename T>
- dawn::Buffer CreateBufferFromData(const dawn::Device& device,
- dawn::BufferUsage usage,
+ wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+ wgpu::BufferUsage usage,
std::initializer_list<T> data) {
return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
}
- dawn::BufferCopyView CreateBufferCopyView(dawn::Buffer buffer,
+ wgpu::BufferCopyView CreateBufferCopyView(wgpu::Buffer buffer,
uint64_t offset,
uint32_t rowPitch,
uint32_t imageHeight);
- dawn::TextureCopyView CreateTextureCopyView(dawn::Texture texture,
+ wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
uint32_t level,
uint32_t slice,
- dawn::Origin3D origin);
+ wgpu::Origin3D origin);
- struct ComboRenderPassDescriptor : public dawn::RenderPassDescriptor {
+ struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
public:
- ComboRenderPassDescriptor(std::initializer_list<dawn::TextureView> colorAttachmentInfo,
- dawn::TextureView depthStencil = dawn::TextureView());
+ ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+ wgpu::TextureView depthStencil = wgpu::TextureView());
const ComboRenderPassDescriptor& operator=(
const ComboRenderPassDescriptor& otherRenderPass);
- std::array<dawn::RenderPassColorAttachmentDescriptor, kMaxColorAttachments>
+ std::array<wgpu::RenderPassColorAttachmentDescriptor, kMaxColorAttachments>
cColorAttachments;
- dawn::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo;
+ wgpu::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo;
};
struct BasicRenderPass {
@@ -71,27 +71,27 @@ namespace utils {
BasicRenderPass();
BasicRenderPass(uint32_t width,
uint32_t height,
- dawn::Texture color,
- dawn::TextureFormat texture = kDefaultColorFormat);
+ wgpu::Texture color,
+ wgpu::TextureFormat texture = kDefaultColorFormat);
- static constexpr dawn::TextureFormat kDefaultColorFormat = dawn::TextureFormat::RGBA8Unorm;
+ static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
uint32_t width;
uint32_t height;
- dawn::Texture color;
- dawn::TextureFormat colorFormat;
+ wgpu::Texture color;
+ wgpu::TextureFormat colorFormat;
utils::ComboRenderPassDescriptor renderPassInfo;
};
- BasicRenderPass CreateBasicRenderPass(const dawn::Device& device,
+ BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
uint32_t width,
uint32_t height);
- dawn::SamplerDescriptor GetDefaultSamplerDescriptor();
- dawn::PipelineLayout MakeBasicPipelineLayout(const dawn::Device& device,
- const dawn::BindGroupLayout* bindGroupLayout);
- dawn::BindGroupLayout MakeBindGroupLayout(
- const dawn::Device& device,
- std::initializer_list<dawn::BindGroupLayoutBinding> bindingsInitializer);
+ wgpu::SamplerDescriptor GetDefaultSamplerDescriptor();
+ wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+ const wgpu::BindGroupLayout* bindGroupLayout);
+ wgpu::BindGroupLayout MakeBindGroupLayout(
+ const wgpu::Device& device,
+ std::initializer_list<wgpu::BindGroupLayoutBinding> bindingsInitializer);
// Helpers to make creating bind groups look nicer:
//
@@ -104,26 +104,26 @@ namespace utils {
// Structure with one constructor per-type of bindings, so that the initializer_list accepts
// bindings with the right type and no extra information.
struct BindingInitializationHelper {
- BindingInitializationHelper(uint32_t binding, const dawn::Sampler& sampler);
- BindingInitializationHelper(uint32_t binding, const dawn::TextureView& textureView);
+ BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
+ BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
BindingInitializationHelper(uint32_t binding,
- const dawn::Buffer& buffer,
- uint64_t offset,
- uint64_t size);
+ const wgpu::Buffer& buffer,
+ uint64_t offset = 0,
+ uint64_t size = wgpu::kWholeSize);
- dawn::BindGroupBinding GetAsBinding() const;
+ wgpu::BindGroupBinding GetAsBinding() const;
uint32_t binding;
- dawn::Sampler sampler;
- dawn::TextureView textureView;
- dawn::Buffer buffer;
+ wgpu::Sampler sampler;
+ wgpu::TextureView textureView;
+ wgpu::Buffer buffer;
uint64_t offset = 0;
uint64_t size = 0;
};
- dawn::BindGroup MakeBindGroup(
- const dawn::Device& device,
- const dawn::BindGroupLayout& layout,
+ wgpu::BindGroup MakeBindGroup(
+ const wgpu::Device& device,
+ const wgpu::BindGroupLayout& layout,
std::initializer_list<BindingInitializationHelper> bindingsInitializer);
} // namespace utils