summaryrefslogtreecommitdiff
path: root/chromium/third_party/dawn
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-29 10:46:47 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-11-02 12:02:10 +0000
commit99677208ff3b216fdfec551fbe548da5520cd6fb (patch)
tree476a4865c10320249360e859d8fdd3e01833b03a /chromium/third_party/dawn
parentc30a6232df03e1efbd9f3b226777b07e087a1122 (diff)
downloadqtwebengine-chromium-99677208ff3b216fdfec551fbe548da5520cd6fb.tar.gz
BASELINE: Update Chromium to 86.0.4240.124
Change-Id: Ide0ff151e94cd665ae6521a446995d34a9d1d644 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/dawn')
-rw-r--r--chromium/third_party/dawn/DEPS94
-rw-r--r--chromium/third_party/dawn/PRESUBMIT.py49
-rw-r--r--chromium/third_party/dawn/dawn.json242
-rw-r--r--chromium/third_party/dawn/dawn_wire.json50
-rw-r--r--chromium/third_party/dawn/examples/Animometer.cpp5
-rw-r--r--chromium/third_party/dawn/examples/BUILD.gn36
-rw-r--r--chromium/third_party/dawn/examples/CHelloTriangle.cpp2
-rw-r--r--chromium/third_party/dawn/examples/ComputeBoids.cpp17
-rw-r--r--chromium/third_party/dawn/examples/CppHelloTriangle.cpp22
-rw-r--r--chromium/third_party/dawn/examples/CubeReflection.cpp106
-rw-r--r--chromium/third_party/dawn/examples/ManualSwapChainTest.cpp3
-rw-r--r--chromium/third_party/dawn/examples/SampleUtils.cpp45
-rw-r--r--chromium/third_party/dawn/generator/BUILD.gn20
-rw-r--r--chromium/third_party/dawn/generator/dawn_json_generator.py380
-rw-r--r--chromium/third_party/dawn/generator/generator_lib.gni12
-rw-r--r--chromium/third_party/dawn/generator/generator_lib.py134
-rw-r--r--chromium/third_party/dawn/generator/opengl_loader_generator.py87
-rw-r--r--chromium/third_party/dawn/generator/remove_files.py36
-rw-r--r--chromium/third_party/dawn/generator/templates/.clang-format2
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiObjects.h22
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp18
-rw-r--r--chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.h41
-rw-r--r--chromium/third_party/dawn/generator/templates/mock_webgpu.cpp18
-rw-r--r--chromium/third_party/dawn/generator/templates/mock_webgpu.h15
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu.h2
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp33
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_cpp.h5
-rw-r--r--chromium/third_party/dawn/generator/templates/webgpu_struct_info.json5
-rw-r--r--chromium/third_party/dawn/infra/config/PRESUBMIT.py6
-rw-r--r--chromium/third_party/dawn/scripts/dawn_features.gni6
-rwxr-xr-xchromium/third_party/dawn/scripts/git-clang-format579
-rwxr-xr-xchromium/third_party/dawn/scripts/lint_clang_format.sh44
-rwxr-xr-xchromium/third_party/dawn/scripts/perf_test_runner.py14
-rwxr-xr-xchromium/third_party/dawn/scripts/roll-shader-deps.sh2
-rwxr-xr-xchromium/third_party/dawn/scripts/travis_lint_format.sh27
-rw-r--r--chromium/third_party/dawn/src/common/BUILD.gn15
-rw-r--r--chromium/third_party/dawn/src/common/BitSetIterator.h2
-rw-r--r--chromium/third_party/dawn/src/common/CMakeLists.txt3
-rw-r--r--chromium/third_party/dawn/src/common/Constants.h28
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.cpp3
-rw-r--r--chromium/third_party/dawn/src/common/GPUInfo.h3
-rw-r--r--chromium/third_party/dawn/src/common/HashUtils.h2
-rw-r--r--chromium/third_party/dawn/src/common/LinkedList.h2
-rw-r--r--chromium/third_party/dawn/src/common/Math.cpp2
-rw-r--r--chromium/third_party/dawn/src/common/Math.h16
-rw-r--r--chromium/third_party/dawn/src/common/Platform.h6
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.cpp2
-rw-r--r--chromium/third_party/dawn/src/common/RefCounted.h8
-rw-r--r--chromium/third_party/dawn/src/common/SerialStorage.h4
-rw-r--r--chromium/third_party/dawn/src/common/StackContainer.h262
-rw-r--r--chromium/third_party/dawn/src/common/SystemUtils.cpp5
-rw-r--r--chromium/third_party/dawn/src/common/ityp_array.h8
-rw-r--r--chromium/third_party/dawn/src/common/ityp_bitset.h2
-rw-r--r--chromium/third_party/dawn/src/common/ityp_stack_vec.h103
-rw-r--r--chromium/third_party/dawn/src/common/ityp_vector.h108
-rw-r--r--chromium/third_party/dawn/src/common/vulkan_platform.h10
-rw-r--r--chromium/third_party/dawn/src/dawn/BUILD.gn24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BUILD.gn16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroup.cpp19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp133
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h20
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp137
-rw-r--r--chromium/third_party/dawn/src/dawn_native/BindingInfo.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.cpp339
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Buffer.h76
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CMakeLists.txt10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandAllocator.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp121
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandBuffer.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp522
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandEncoder.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp246
-rw-r--r--chromium/third_party/dawn/src/dawn_native/CommandValidation.h33
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.cpp24
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Commands.h19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DawnNative.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.cpp110
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Device.h14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/DynamicUploader.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h48
-rw-r--r--chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h80
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.cpp98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Format.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Instance.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Pipeline.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/QuerySet.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.cpp157
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Queue.h30
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp19
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp18
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RenderPipeline.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceHeap.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp174
-rw-r--r--chromium/third_party/dawn/src/dawn_native/ShaderModule.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/StagingBuffer.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Surface.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.cpp133
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Texture.h53
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.cpp217
-rw-r--r--chromium/third_party/dawn/src/dawn_native/Toggles.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp50
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp201
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h31
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp346
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp52
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp88
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h36
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp68
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp61
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp125
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h9
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp84
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp268
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp122
-rw-r--r--chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h29
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm98
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h27
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm343
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm83
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm26
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h7
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm82
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm106
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h32
-rw-r--r--chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm143
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp54
-rw-r--r--chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h23
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp109
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp166
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp1
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h5
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp16
-rw-r--r--chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp87
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp12
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp22
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp13
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp107
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h25
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp100
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp93
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp10
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp112
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h45
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp102
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp11
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp14
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h3
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp4
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp163
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h2
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp75
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h6
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp7
-rw-r--r--chromium/third_party/dawn/src/dawn_platform/BUILD.gn4
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/BUILD.gn12
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt3
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h1
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp433
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp410
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Buffer.h47
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Client.cpp8
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp142
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.cpp43
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Device.h18
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp60
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Fence.h24
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp91
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/client/Queue.h43
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/Server.h21
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp203
-rw-r--r--chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp17
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h7
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/DawnNative.h15
-rw-r--r--chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireClient.h2
-rw-r--r--chromium/third_party/dawn/src/include/dawn_wire/WireServer.h2
-rw-r--r--chromium/third_party/dawn/src/include/webgpu/webgpu.h1
-rw-r--r--chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h1
-rw-r--r--chromium/third_party/dawn/src/tests/BUILD.gn20
-rw-r--r--chromium/third_party/dawn/src/utils/BUILD.gn33
-rw-r--r--chromium/third_party/dawn/src/utils/CMakeLists.txt9
-rw-r--r--chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp29
-rw-r--r--chromium/third_party/dawn/src/utils/PlatformDebugLogger.h29
-rw-r--r--chromium/third_party/dawn/src/utils/SystemUtils.h2
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.cpp107
-rw-r--r--chromium/third_party/dawn/src/utils/TestUtils.h57
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp141
-rw-r--r--chromium/third_party/dawn/src/utils/TextureFormatUtils.h92
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.cpp75
-rw-r--r--chromium/third_party/dawn/src/utils/WGPUHelpers.h38
-rw-r--r--chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp104
-rw-r--r--chromium/third_party/dawn/third_party/.clang-format2
-rw-r--r--chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn2
-rw-r--r--chromium/third_party/dawn/third_party/khronos/BUILD.gn4
270 files changed, 8680 insertions, 4295 deletions
diff --git a/chromium/third_party/dawn/DEPS b/chromium/third_party/dawn/DEPS
index 13c8d7911a7..e9550496188 100644
--- a/chromium/third_party/dawn/DEPS
+++ b/chromium/third_party/dawn/DEPS
@@ -1,5 +1,9 @@
use_relative_paths = True
-use_relative_hooks = True
+
+gclient_gn_args_file = 'build/config/gclient_args.gni'
+gclient_gn_args = [
+ 'mac_xcode_version',
+]
vars = {
'chromium_git': 'https://chromium.googlesource.com',
@@ -8,24 +12,25 @@ vars = {
'swiftshader_git': 'https://swiftshader.googlesource.com',
'dawn_standalone': True,
+
+ # This can be overridden, e.g. with custom_vars, to download a nonstandard
+ # Xcode version in build/mac_toolchain.py instead of downloading the
+ # prebuilt pinned revision.
+ 'mac_xcode_version': 'default',
}
deps = {
# Dependencies required to use GN/Clang in standalone
'build': {
- 'url': '{chromium_git}/chromium/src/build@896323eeda1bd1b01156b70625d5e14de225ebc3',
+ 'url': '{chromium_git}/chromium/src/build@b8f14c09b76ae3bd6edabe45105527a97e1e16bd',
'condition': 'dawn_standalone',
},
'buildtools': {
- 'url': '{chromium_git}/chromium/src/buildtools@2c41dfb19abe40908834803b6fed797b0f341fe1',
+ 'url': '{chromium_git}/chromium/src/buildtools@eb3987ec709b39469423100c1e77f0446890e059',
'condition': 'dawn_standalone',
},
'tools/clang': {
- 'url': '{chromium_git}/chromium/src/tools/clang@698732d5db36040c07d5cc5f9137fcc943494c11',
- 'condition': 'dawn_standalone',
- },
- 'third_party/binutils': {
- 'url': '{chromium_git}/chromium/src/third_party/binutils@f9ce777698a819dff4d6a033b31122d91a49b62e',
+ 'url': '{chromium_git}/chromium/src/tools/clang@d027d75e8dd91140115a4cc9c7c3598c44bbf634',
'condition': 'dawn_standalone',
},
'tools/clang/dsymutil': {
@@ -61,31 +66,31 @@ deps = {
# SPIRV-Cross
'third_party/spirv-cross': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Cross@2e7a5625835380857e785716efd8b2720bf2c84a',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Cross@4c7944bb4260ab0466817c932a9673b6cf59438e',
'condition': 'dawn_standalone',
},
# SPIRV compiler dependencies: SPIRV-Tools, SPIRV-headers, glslang and shaderc
'third_party/SPIRV-Tools': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Tools@52a5f074e9bb6712487653cf360771e98a1ebe97',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Tools@1023dd7a04be15064188d0e511e1708ef1c5af4a',
'condition': 'dawn_standalone',
},
'third_party/spirv-headers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Headers@11d7637e7a43cd88cfd4e42c99581dcb682936aa',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/SPIRV-Headers@3fdabd0da2932c276b25b9b4a988ba134eba1aa6',
'condition': 'dawn_standalone',
},
'third_party/glslang': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/glslang@839704450200e407490c538418f4d1a493b789ab',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/glslang@f257e0ea6b9aeab2dc7af3207ac6d29d2bbc01d0',
'condition': 'dawn_standalone',
},
'third_party/shaderc': {
- 'url': '{chromium_git}/external/github.com/google/shaderc@caa519ca532a6a3a0279509fce2ceb791c4f4651',
+ 'url': '{chromium_git}/external/github.com/google/shaderc@21b36f7368092216ecfaa017e95c383c2ed9db70',
'condition': 'dawn_standalone',
},
# WGSL support
'third_party/tint': {
- 'url': '{dawn_git}/tint@0c647a8896d127ba2526f18ed034f48bb35c878a',
+ 'url': '{dawn_git}/tint@b08e25388629f5a8f467b726ace18df529bba3ef',
'condition': 'dawn_standalone',
},
@@ -101,19 +106,13 @@ deps = {
'condition': 'dawn_standalone',
},
- # Our own pre-compiled Linux clang-format 7.0 for presubmit
- 'third_party/clang-format': {
- 'url': '{dawn_git}/clang-format@2451c56cd368676cdb230fd5ad11731ab859f1a3',
- 'condition': 'dawn_standalone and checkout_linux',
- },
-
# Khronos Vulkan headers, validation layers and loader.
'third_party/vulkan-headers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-Headers@09531f27933bf04bffde9074acb302e026e8f181',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-Headers@4c079bf40c2587220dbf157d825d3185c9adc896',
'condition': 'dawn_standalone',
},
'third_party/vulkan-validation-layers': {
- 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@31bdb2d4376919f5964dd8314dd2192810f08b36',
+ 'url': '{chromium_git}/external/github.com/KhronosGroup/Vulkan-ValidationLayers@e8b96e86fe2edfaee274b98fbbe1bd65579b0904',
'condition': 'dawn_standalone',
},
'third_party/vulkan-loader': {
@@ -122,51 +121,13 @@ deps = {
},
'third_party/swiftshader': {
- 'url': '{swiftshader_git}/SwiftShader@d25ce872522427bd7a957acf7126dc5aae9d26fc',
+ 'url': '{swiftshader_git}/SwiftShader@e8dd233c7a85f3c689caf06c226a7f8405a480d3',
'condition': 'dawn_standalone',
},
}
hooks = [
- # Pull clang-format binaries using checked-in hashes.
- {
- 'name': 'clang_format_win',
- 'pattern': '.',
- 'condition': 'host_os == "win" and dawn_standalone',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=win32',
- '--no_auth',
- '--bucket', 'chromium-clang-format',
- '-s', 'buildtools/win/clang-format.exe.sha1',
- ],
- },
- {
- 'name': 'clang_format_mac',
- 'pattern': '.',
- 'condition': 'host_os == "mac" and dawn_standalone',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=darwin',
- '--no_auth',
- '--bucket', 'chromium-clang-format',
- '-s', 'buildtools/mac/clang-format.sha1',
- ],
- },
- {
- 'name': 'clang_format_linux',
- 'pattern': '.',
- 'condition': 'host_os == "linux" and dawn_standalone',
- 'action': [ 'download_from_google_storage',
- '--no_resume',
- '--platform=linux*',
- '--no_auth',
- '--bucket', 'chromium-clang-format',
- '-s', 'buildtools/linux64/clang-format.sha1',
- ],
- },
-
# Pull the compilers and system libraries for hermetic builds
{
'name': 'sysroot_x86',
@@ -208,20 +169,11 @@ hooks = [
'-s', 'build/toolchain/win/rc/win/rc.exe.sha1',
],
},
- # Pull binutils for linux hermetic builds
- {
- 'name': 'binutils',
- 'pattern': 'src/third_party/binutils',
- 'condition': 'host_os == "linux" and dawn_standalone',
- 'action': [
- 'python',
- 'third_party/binutils/download.py',
- ],
- },
# Update build/util/LASTCHANGE.
{
'name': 'lastchange',
'pattern': '.',
+ 'condition': 'dawn_standalone',
'action': ['python', 'build/util/lastchange.py',
'-o', 'build/util/LASTCHANGE'],
},
diff --git a/chromium/third_party/dawn/PRESUBMIT.py b/chromium/third_party/dawn/PRESUBMIT.py
index ae10063e7d5..4ad052cb994 100644
--- a/chromium/third_party/dawn/PRESUBMIT.py
+++ b/chromium/third_party/dawn/PRESUBMIT.py
@@ -16,54 +16,21 @@ import os
import platform
import subprocess
-def _DoClangFormat(input_api, output_api):
- # Our binary clang-format is a linux binary compiled for x64
- if platform.system() != 'Linux' or platform.architecture()[0] != '64bit':
- return [output_api.PresubmitNotifyResult('Skipping clang-format')]
-
- # We need to know which commit to diff against. It doesn't seem to be exposed anywhere
- # except in that private member of presubmit_support.Change. This is likely to break
- # but hopefully we have an updated clang-format in CPID/GS before it does.
- upstream_commit = input_api.change._upstream
- if upstream_commit == None:
- return []
-
- lint_cmd = [
- 'scripts/lint_clang_format.sh',
- 'third_party/clang-format/clang-format',
- upstream_commit
- ]
-
- # Make clang-format use our linux x64 sysroot because it is compiled with a version of
- # stdlibc++ that's incompatible with the old libraries present on the bots.
- env = {
- 'LD_LIBRARY_PATH': os.path.join(
- os.getcwd(),
- 'build',
- 'linux',
- 'debian_sid_amd64-sysroot',
- 'usr',
- 'lib',
- 'x86_64-linux-gnu'
- )
- }
-
- # Call the linting script and forward the output as a notification or as an error
- try:
- output = subprocess.check_output(lint_cmd, env=env);
- return [output_api.PresubmitNotifyResult(output)]
- except subprocess.CalledProcessError as e:
- return [output_api.PresubmitError(e.output)]
def _DoCommonChecks(input_api, output_api):
results = []
- results.extend(input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
- results.extend(input_api.canned_checks.CheckGNFormatted(input_api, output_api))
- results.extend(_DoClangFormat(input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckPatchFormatted(input_api,
+ output_api,
+ check_python=True))
return results
+
def CheckChangeOnUpload(input_api, output_api):
return _DoCommonChecks(input_api, output_api)
+
def CheckChangeOnCommit(input_api, output_api):
return _DoCommonChecks(input_api, output_api)
diff --git a/chromium/third_party/dawn/dawn.json b/chromium/third_party/dawn/dawn.json
index f313dcf851e..791763c92d8 100644
--- a/chromium/third_party/dawn/dawn.json
+++ b/chromium/third_party/dawn/dawn.json
@@ -92,11 +92,11 @@
{"name": "visibility", "type": "shader stage"},
{"name": "type", "type": "binding type"},
{"name": "has dynamic offset", "type": "bool", "default": "false"},
+ {"name": "min buffer binding size", "type": "uint64_t", "default": "0"},
{"name": "multisampled", "type": "bool", "default": "false"},
{"name": "view dimension", "type": "texture view dimension", "default": "undefined"},
{"name": "texture component type", "type": "texture component type", "default": "float"},
- {"name": "storage texture format", "type": "texture format", "default": "undefined"},
- {"name": "min buffer binding size", "type": "uint64_t", "default": "0"}
+ {"name": "storage texture format", "type": "texture format", "default": "undefined"}
]
},
"bind group layout descriptor": {
@@ -117,9 +117,9 @@
{"value": 3, "name": "sampler"},
{"value": 4, "name": "comparison sampler"},
{"value": 5, "name": "sampled texture"},
- {"value": 6, "name": "storage texture"},
- {"value": 7, "name": "readonly storage texture"},
- {"value": 8, "name": "writeonly storage texture"}
+ {"value": 6, "name": "readonly storage texture"},
+ {"value": 7, "name": "writeonly storage texture"},
+ {"value": 8, "name": "storage texture"}
]
},
"blend descriptor": {
@@ -176,14 +176,6 @@
"category": "object",
"methods": [
{
- "name": "set sub data",
- "args": [
- {"name": "start", "type": "uint64_t"},
- {"name": "count", "type": "uint64_t"},
- {"name": "data", "type": "void", "annotation": "const*", "length": "count"}
- ]
- },
- {
"name": "map read async",
"args": [
{"name": "callback", "type": "buffer map read callback"},
@@ -198,6 +190,32 @@
]
},
{
+ "name": "map async",
+ "args": [
+ {"name": "mode", "type": "map mode"},
+ {"name": "offset", "type": "size_t"},
+ {"name": "size", "type": "size_t"},
+ {"name": "callback", "type": "buffer map callback"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
+ ]
+ },
+ {
+ "name": "get mapped range",
+ "returns": "void *",
+ "args": [
+ {"name": "offset", "type": "size_t", "default": 0},
+ {"name": "size", "type": "size_t", "default": 0}
+ ]
+ },
+ {
+ "name": "get const mapped range",
+ "returns": "void const *",
+ "args": [
+ {"name": "offset", "type": "size_t", "default": 0},
+ {"name": "size", "type": "size_t", "default": 0}
+ ]
+ },
+ {
"name": "unmap"
},
{
@@ -209,10 +227,8 @@
"category": "structure",
"extensible": true,
"members": [
- {"name": "buffer", "type": "buffer"},
- {"name": "offset", "type": "uint64_t", "default": 0},
- {"name": "bytes per row", "type": "uint32_t"},
- {"name": "rows per image", "type": "uint32_t", "default": 0}
+ {"name": "layout", "type": "texture data layout"},
+ {"name": "buffer", "type": "buffer"}
]
},
"buffer descriptor": {
@@ -221,7 +237,15 @@
"members": [
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
{"name": "usage", "type": "buffer usage"},
- {"name": "size", "type": "uint64_t"}
+ {"name": "size", "type": "uint64_t"},
+ {"name": "mapped at creation", "type": "bool", "default": "false"}
+ ]
+ },
+ "buffer map callback": {
+ "category": "callback",
+ "args": [
+ {"name": "status", "type": "buffer map async status"},
+ {"name": "userdata", "type": "void", "annotation": "*"}
]
},
"buffer map read callback": {
@@ -263,7 +287,8 @@
{"value": 32, "name": "vertex"},
{"value": 64, "name": "uniform"},
{"value": 128, "name": "storage"},
- {"value": 256, "name": "indirect"}
+ {"value": 256, "name": "indirect"},
+ {"value": 512, "name": "query resolve"}
]
},
"char": {
@@ -371,7 +396,7 @@
{
"name": "insert debug marker",
"args": [
- {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+ {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
{
@@ -383,6 +408,23 @@
"args": [
{"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
]
+ },
+ {
+ "name": "resolve query set",
+ "args": [
+ {"name": "query set", "type": "query set"},
+ {"name": "first query", "type": "uint32_t"},
+ {"name": "query count", "type": "uint32_t"},
+ {"name": "destination", "type": "buffer"},
+ {"name": "destination offset", "type": "uint64_t"}
+ ]
+ },
+ {
+ "name": "write timestamp",
+ "args": [
+ {"name": "query set", "type": "query set"},
+ {"name": "query index", "type": "uint32_t"}
+ ]
}
]
},
@@ -396,7 +438,7 @@
"compare function": {
"category": "enum",
"values": [
- {"value": 0, "name": "undefined"},
+ {"value": 0, "name": "undefined", "jsrepr": "undefined"},
{"value": 1, "name": "never"},
{"value": 2, "name": "less"},
{"value": 3, "name": "less equal"},
@@ -420,7 +462,7 @@
{
"name": "insert debug marker",
"args": [
- {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+ {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
{
@@ -449,6 +491,13 @@
]
},
{
+ "name": "write timestamp",
+ "args": [
+ {"name": "query set", "type": "query set"},
+ {"name": "query index", "type": "uint32_t"}
+ ]
+ },
+ {
"name": "dispatch",
"args": [
{"name": "x", "type": "uint32_t"},
@@ -548,17 +597,17 @@
]
},
{
- "name": "create render pipeline",
- "returns": "render pipeline",
+ "name": "create pipeline layout",
+ "returns": "pipeline layout",
"args": [
- {"name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"}
+ {"name": "descriptor", "type": "pipeline layout descriptor", "annotation": "const*"}
]
},
{
- "name": "create pipeline layout",
- "returns": "pipeline layout",
+ "name": "create query set",
+ "returns": "query set",
"args": [
- {"name": "descriptor", "type": "pipeline layout descriptor", "annotation": "const*"}
+ {"name": "descriptor", "type": "query set descriptor", "annotation": "const*"}
]
},
{
@@ -569,6 +618,13 @@
]
},
{
+ "name": "create render pipeline",
+ "returns": "render pipeline",
+ "args": [
+ {"name": "descriptor", "type": "render pipeline descriptor", "annotation": "const*"}
+ ]
+ },
+ {
"name": "create sampler",
"returns": "sampler",
"args": [
@@ -642,13 +698,6 @@
{"name": "callback", "type": "error callback"},
{"name": "userdata", "type": "void", "annotation": "*"}
]
- },
- {
- "name": "create query set",
- "returns": "query set",
- "args": [
- {"name": "descriptor", "type": "query set descriptor", "annotation": "const*"}
- ]
}
]
},
@@ -840,6 +889,14 @@
{"value": 1, "name": "load"}
]
},
+ "map mode": {
+ "category": "bitmask",
+ "values": [
+ {"value": 0, "name": "none"},
+ {"value": 1, "name": "read"},
+ {"value": 2, "name": "write"}
+ ]
+ },
"store op": {
"category": "enum",
"values": [
@@ -867,7 +924,7 @@
{"name": "bind group layouts", "type": "bind group layout", "annotation": "const*", "length": "bind group layout count"}
]
},
- "pipeline statistics name": {
+ "pipeline statistic name": {
"category": "enum",
"values": [
{"value": 0, "name": "vertex shader invocations"},
@@ -918,8 +975,8 @@
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
{"name": "type", "type": "query type"},
{"name": "count", "type": "uint32_t"},
- {"name": "pipeline statistics count", "type": "uint32_t", "default": "0"},
- {"name": "pipeline statistics", "type": "pipeline statistics name", "annotation": "const*", "length": "pipeline statistics count"}
+ {"name": "pipeline statistics", "type": "pipeline statistic name", "annotation": "const*", "length": "pipeline statistics count"},
+ {"name": "pipeline statistics count", "type": "uint32_t", "default": "0"}
]
},
"query type": {
@@ -962,6 +1019,16 @@
{"name": "data", "type": "void", "annotation": "const*", "length": "size"},
{"name": "size", "type": "size_t"}
]
+ },
+ {
+ "name": "write texture",
+ "args": [
+ {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "data", "type": "void", "annotation": "const*", "length": "data size"},
+ {"name": "data size", "type": "size_t"},
+ {"name": "data layout", "type": "texture data layout", "annotation": "const*"},
+ {"name": "write size", "type": "extent 3D", "annotation": "const*"}
+ ]
}
]
},
@@ -1036,7 +1103,7 @@
{
"name": "insert debug marker",
"args": [
- {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+ {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
{
@@ -1114,9 +1181,11 @@
{"name": "depth load op", "type": "load op"},
{"name": "depth store op", "type": "store op"},
{"name": "clear depth", "type": "float"},
+ {"name": "depth read only", "type": "bool", "default": "false"},
{"name": "stencil load op", "type": "load op"},
{"name": "stencil store op", "type": "store op"},
- {"name": "clear stencil", "type": "uint32_t", "default": "0"}
+ {"name": "clear stencil", "type": "uint32_t", "default": "0"},
+ {"name": "stencil read only", "type": "bool", "default": "false"}
]
},
@@ -1127,7 +1196,8 @@
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true},
{"name": "color attachment count", "type": "uint32_t"},
{"name": "color attachments", "type": "render pass color attachment descriptor", "annotation": "const*", "length": "color attachment count"},
- {"name": "depth stencil attachment", "type": "render pass depth stencil attachment descriptor", "annotation": "const*", "optional": true}
+ {"name": "depth stencil attachment", "type": "render pass depth stencil attachment descriptor", "annotation": "const*", "optional": true},
+ {"name": "occlusion query set", "type": "query set", "optional": true}
]
},
"render pass encoder": {
@@ -1191,7 +1261,7 @@
{
"name": "insert debug marker",
"args": [
- {"name": "group label", "type": "char", "annotation": "const*", "length": "strlen"}
+ {"name": "marker label", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
{
@@ -1254,6 +1324,13 @@
]
},
{
+ "name": "write timestamp",
+ "args": [
+ {"name": "query set", "type": "query set"},
+ {"name": "query index", "type": "uint32_t"}
+ ]
+ },
+ {
"name": "end pass"
}
]
@@ -1389,11 +1466,11 @@
{"name": "label", "type": "char", "annotation": "const*", "length": "strlen", "optional": true}
]
},
- "surface descriptor from HTML canvas id": {
+ "surface descriptor from canvas HTML selector": {
"category": "structure",
"chained": true,
"members": [
- {"name": "id", "type": "char", "annotation": "const*", "length": "strlen"}
+ {"name": "selector", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
"surface descriptor from metal layer": {
@@ -1459,7 +1536,7 @@
{"value": 1, "name": "surface descriptor from metal layer"},
{"value": 2, "name": "surface descriptor from windows HWND"},
{"value": 3, "name": "surface descriptor from xlib"},
- {"value": 4, "name": "surface descriptor from HTML canvas id"},
+ {"value": 4, "name": "surface descriptor from canvas HTML selector"},
{"value": 5, "name": "shader module SPIRV descriptor"},
{"value": 6, "name": "shader module WGSL descriptor"},
{"value": 7, "name": "sampler descriptor dummy anisotropic filtering"},
@@ -1503,8 +1580,17 @@
"members": [
{"name": "texture", "type": "texture"},
{"name": "mip level", "type": "uint32_t", "default": "0"},
- {"name": "array layer", "type": "uint32_t", "default": "0"},
- {"name": "origin", "type": "origin 3D"}
+ {"name": "origin", "type": "origin 3D"},
+ {"name": "aspect", "type": "texture aspect", "default": "all"}
+ ]
+ },
+ "texture data layout": {
+ "category": "structure",
+ "extensible": true,
+ "members": [
+ {"name": "offset", "type": "uint64_t", "default": 0},
+ {"name": "bytes per row", "type": "uint32_t"},
+ {"name": "rows per image", "type": "uint32_t", "default": 0}
]
},
"texture descriptor": {
@@ -1515,7 +1601,6 @@
{"name": "usage", "type": "texture usage"},
{"name": "dimension", "type": "texture dimension", "default": "2D"},
{"name": "size", "type": "extent 3D"},
- {"name": "array layer count", "type": "uint32_t", "default": "1"},
{"name": "format", "type": "texture format"},
{"name": "mip level count", "type": "uint32_t", "default": 1},
{"name": "sample count", "type": "uint32_t", "default": 1}
@@ -1560,37 +1645,38 @@
{"value": 23, "name": "BGRA8 unorm"},
{"value": 24, "name": "BGRA8 unorm srgb"},
{"value": 25, "name": "RGB10 A2 unorm"},
- {"value": 26, "name": "RG11 B10 float"},
+ {"value": 26, "name": "RG11 B10 ufloat"},
+ {"value": 27, "name": "RGB9 E5 ufloat"},
- {"value": 27, "name": "RG32 float"},
- {"value": 28, "name": "RG32 uint"},
- {"value": 29, "name": "RG32 sint"},
- {"value": 30, "name": "RGBA16 uint"},
- {"value": 31, "name": "RGBA16 sint"},
- {"value": 32, "name": "RGBA16 float"},
+ {"value": 28, "name": "RG32 float"},
+ {"value": 29, "name": "RG32 uint"},
+ {"value": 30, "name": "RG32 sint"},
+ {"value": 31, "name": "RGBA16 uint"},
+ {"value": 32, "name": "RGBA16 sint"},
+ {"value": 33, "name": "RGBA16 float"},
- {"value": 33, "name": "RGBA32 float"},
- {"value": 34, "name": "RGBA32 uint"},
- {"value": 35, "name": "RGBA32 sint"},
+ {"value": 34, "name": "RGBA32 float"},
+ {"value": 35, "name": "RGBA32 uint"},
+ {"value": 36, "name": "RGBA32 sint"},
- {"value": 36, "name": "depth32 float"},
- {"value": 37, "name": "depth24 plus"},
- {"value": 38, "name": "depth24 plus stencil8"},
+ {"value": 37, "name": "depth32 float"},
+ {"value": 38, "name": "depth24 plus"},
+ {"value": 39, "name": "depth24 plus stencil8"},
- {"value": 39, "name": "BC1 RGBA unorm"},
- {"value": 40, "name": "BC1 RGBA unorm srgb"},
- {"value": 41, "name": "BC2 RGBA unorm"},
- {"value": 42, "name": "BC2 RGBA unorm srgb"},
- {"value": 43, "name": "BC3 RGBA unorm"},
- {"value": 44, "name": "BC3 RGBA unorm srgb"},
- {"value": 45, "name": "BC4 R unorm"},
- {"value": 46, "name": "BC4 R snorm"},
- {"value": 47, "name": "BC5 RG unorm"},
- {"value": 48, "name": "BC5 RG snorm"},
- {"value": 49, "name": "BC6H RGB ufloat"},
- {"value": 50, "name": "BC6H RGB sfloat"},
- {"value": 51, "name": "BC7 RGBA unorm"},
- {"value": 52, "name": "BC7 RGBA unorm srgb"}
+ {"value": 40, "name": "BC1 RGBA unorm"},
+ {"value": 41, "name": "BC1 RGBA unorm srgb"},
+ {"value": 42, "name": "BC2 RGBA unorm"},
+ {"value": 43, "name": "BC2 RGBA unorm srgb"},
+ {"value": 44, "name": "BC3 RGBA unorm"},
+ {"value": 45, "name": "BC3 RGBA unorm srgb"},
+ {"value": 46, "name": "BC4 R unorm"},
+ {"value": 47, "name": "BC4 R snorm"},
+ {"value": 48, "name": "BC5 RG unorm"},
+ {"value": 49, "name": "BC5 RG snorm"},
+ {"value": 50, "name": "BC6H RGB ufloat"},
+ {"value": 51, "name": "BC6H RGB sfloat"},
+ {"value": 52, "name": "BC7 RGBA unorm"},
+ {"value": 53, "name": "BC7 RGBA unorm srgb"}
]
},
"texture usage": {
@@ -1690,6 +1776,12 @@
"void": {
"category": "native"
},
+ "void *": {
+ "category": "native"
+ },
+ "void const *": {
+ "category": "native"
+ },
"uint32_t": {
"category": "native"
},
diff --git a/chromium/third_party/dawn/dawn_wire.json b/chromium/third_party/dawn/dawn_wire.json
index 5291c3e228c..b2256a5a52a 100644
--- a/chromium/third_party/dawn/dawn_wire.json
+++ b/chromium/third_party/dawn/dawn_wire.json
@@ -14,26 +14,25 @@
"See the License for the specific language governing permissions and",
"limitations under the License."
],
+ "_todos": [
+ "Remove usage of size_t because it is not network transparent"
+ ],
"commands": {
"buffer map async": [
{ "name": "buffer id", "type": "ObjectId" },
{ "name": "request serial", "type": "uint32_t" },
- { "name": "is write", "type": "bool" },
+ { "name": "mode", "type": "map mode" },
+ { "name": "offset", "type": "size_t"},
+ { "name": "size", "type": "size_t"},
{ "name": "handle create info length", "type": "uint64_t" },
{ "name": "handle create info", "type": "uint8_t", "annotation": "const*", "length": "handle create info length", "skip_serialize": true}
],
- "buffer set sub data internal": [
- {"name": "buffer id", "type": "ObjectId" },
- {"name": "start", "type": "uint64_t"},
- {"name": "count", "type": "uint64_t"},
- {"name": "data", "type": "uint8_t", "annotation": "const*", "length": "count"}
- ],
"buffer update mapped data": [
{ "name": "buffer id", "type": "ObjectId" },
{ "name": "write flush info length", "type": "uint64_t" },
{ "name": "write flush info", "type": "uint8_t", "annotation": "const*", "length": "write flush info length", "skip_serialize": true}
],
- "device create buffer mapped": [
+ "device create buffer": [
{ "name": "device", "type": "device" },
{ "name": "descriptor", "type": "buffer descriptor", "annotation": "const*" },
{ "name": "result", "type": "ObjectHandle", "handle_type": "buffer" },
@@ -54,20 +53,23 @@
{"name": "buffer offset", "type": "uint64_t"},
{"name": "data", "type": "uint8_t", "annotation": "const*", "length": "size"},
{"name": "size", "type": "size_t"}
+ ],
+ "queue write texture internal": [
+ {"name": "queue id", "type": "ObjectId" },
+ {"name": "destination", "type": "texture copy view", "annotation": "const*"},
+ {"name": "data", "type": "uint8_t", "annotation": "const*", "length": "data size"},
+ {"name": "data size", "type": "size_t"},
+ {"name": "data layout", "type": "texture data layout", "annotation": "const*"},
+ {"name": "writeSize", "type": "extent 3D", "annotation": "const*"}
]
},
"return commands": {
- "buffer map read async callback": [
+ "buffer map async callback": [
{ "name": "buffer", "type": "ObjectHandle", "handle_type": "buffer" },
{ "name": "request serial", "type": "uint32_t" },
{ "name": "status", "type": "uint32_t" },
- { "name": "initial data info length", "type": "uint64_t" },
- { "name": "initial data info", "type": "uint8_t", "annotation": "const*", "length": "initial data info length", "skip_serialize": true }
- ],
- "buffer map write async callback": [
- { "name": "buffer", "type": "ObjectHandle", "handle_type": "buffer" },
- { "name": "request serial", "type": "uint32_t" },
- { "name": "status", "type": "uint32_t" }
+ { "name": "read initial data info length", "type": "uint64_t" },
+ { "name": "read initial data info", "type": "uint8_t", "annotation": "const*", "length": "read initial data info length", "skip_serialize": true }
],
"device uncaptured error callback": [
{ "name": "type", "type": "error type"},
@@ -94,22 +96,27 @@
"SurfaceDescriptorFromXlib"
],
"client_side_commands": [
+ "BufferMapAsync",
"BufferMapReadAsync",
"BufferMapWriteAsync",
- "BufferSetSubData",
+ "BufferGetConstMappedRange",
+ "BufferGetMappedRange",
+ "DeviceCreateBuffer",
+ "DeviceCreateBufferMapped",
"DevicePopErrorScope",
"DeviceSetDeviceLostCallback",
"DeviceSetUncapturedErrorCallback",
"FenceGetCompletedValue",
"FenceOnCompletion",
- "QueueWriteBuffer"
+ "QueueWriteBuffer",
+ "QueueWriteTexture"
],
"client_handwritten_commands": [
"BufferDestroy",
"BufferUnmap",
- "DeviceCreateBuffer",
- "DeviceCreateBufferMapped",
+ "DeviceCreateErrorBuffer",
"DeviceGetDefaultQueue",
+ "DeviceInjectError",
"DevicePushErrorScope",
"QueueCreateFence",
"QueueSignal"
@@ -117,7 +124,8 @@
"client_special_objects": [
"Buffer",
"Device",
- "Fence"
+ "Fence",
+ "Queue"
],
"server_custom_pre_handler_commands": [
"BufferDestroy",
diff --git a/chromium/third_party/dawn/examples/Animometer.cpp b/chromium/third_party/dawn/examples/Animometer.cpp
index dfd041cabe0..5c197c4cf55 100644
--- a/chromium/third_party/dawn/examples/Animometer.cpp
+++ b/chromium/third_party/dawn/examples/Animometer.cpp
@@ -18,8 +18,8 @@
#include "utils/SystemUtils.h"
#include "utils/WGPUHelpers.h"
-#include <cstdlib>
#include <cstdio>
+#include <cstdlib>
#include <vector>
wgpu::Device device;
@@ -138,8 +138,7 @@ void init() {
bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform;
ubo = device.CreateBuffer(&bufferDesc);
- bindGroup =
- utils::MakeBindGroup(device, bgl, {{0, ubo, 0, sizeof(ShaderData)}});
+ bindGroup = utils::MakeBindGroup(device, bgl, {{0, ubo, 0, sizeof(ShaderData)}});
}
void frame() {
diff --git a/chromium/third_party/dawn/examples/BUILD.gn b/chromium/third_party/dawn/examples/BUILD.gn
index 39ef9c521f7..96ae72fbe9c 100644
--- a/chromium/third_party/dawn/examples/BUILD.gn
+++ b/chromium/third_party/dawn/examples/BUILD.gn
@@ -49,9 +49,7 @@ static_library("dawn_sample_utils") {
# Template for samples to avoid listing dawn_sample_utils as a dep every time
template("dawn_sample") {
executable(target_name) {
- deps = [
- ":dawn_sample_utils",
- ]
+ deps = [ ":dawn_sample_utils" ]
forward_variables_from(invoker, "*", [ "deps" ])
if (defined(invoker.deps)) {
@@ -61,43 +59,27 @@ template("dawn_sample") {
}
dawn_sample("CppHelloTriangle") {
- sources = [
- "CppHelloTriangle.cpp",
- ]
+ sources = [ "CppHelloTriangle.cpp" ]
}
dawn_sample("CHelloTriangle") {
- sources = [
- "CHelloTriangle.cpp",
- ]
+ sources = [ "CHelloTriangle.cpp" ]
}
dawn_sample("ComputeBoids") {
- sources = [
- "ComputeBoids.cpp",
- ]
- deps = [
- "${dawn_root}/third_party/gn/glm",
- ]
+ sources = [ "ComputeBoids.cpp" ]
+ deps = [ "${dawn_root}/third_party/gn/glm" ]
}
dawn_sample("Animometer") {
- sources = [
- "Animometer.cpp",
- ]
+ sources = [ "Animometer.cpp" ]
}
dawn_sample("CubeReflection") {
- sources = [
- "CubeReflection.cpp",
- ]
- deps = [
- "${dawn_root}/third_party/gn/glm",
- ]
+ sources = [ "CubeReflection.cpp" ]
+ deps = [ "${dawn_root}/third_party/gn/glm" ]
}
dawn_sample("ManualSwapChainTest") {
- sources = [
- "ManualSwapChainTest.cpp",
- ]
+ sources = [ "ManualSwapChainTest.cpp" ]
}
diff --git a/chromium/third_party/dawn/examples/CHelloTriangle.cpp b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
index 048fba1987d..22d820295c4 100644
--- a/chromium/third_party/dawn/examples/CHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CHelloTriangle.cpp
@@ -120,7 +120,7 @@ void frame() {
{
colorAttachment.attachment = backbufferView;
colorAttachment.resolveTarget = nullptr;
- colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
+ colorAttachment.clearColor = {0.0f, 0.0f, 0.0f, 0.0f};
colorAttachment.loadOp = WGPULoadOp_Clear;
colorAttachment.storeOp = WGPUStoreOp_Store;
renderpassInfo.colorAttachmentCount = 1;
diff --git a/chromium/third_party/dawn/examples/ComputeBoids.cpp b/chromium/third_party/dawn/examples/ComputeBoids.cpp
index 59538dfa651..f958ab4002d 100644
--- a/chromium/third_party/dawn/examples/ComputeBoids.cpp
+++ b/chromium/third_party/dawn/examples/ComputeBoids.cpp
@@ -67,7 +67,7 @@ void initBuffers() {
modelBuffer =
utils::CreateBufferFromData(device, model, sizeof(model), wgpu::BufferUsage::Vertex);
- SimParams params = { 0.04f, 0.1f, 0.025f, 0.025f, 0.02f, 0.05f, 0.005f, kNumParticles };
+ SimParams params = {0.04f, 0.1f, 0.025f, 0.025f, 0.02f, 0.05f, 0.005f, kNumParticles};
updateParams =
utils::CreateBufferFromData(device, &params, sizeof(params), wgpu::BufferUsage::Uniform);
@@ -75,8 +75,7 @@ void initBuffers() {
{
std::mt19937 generator;
std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
- for (auto& p : initialParticles)
- {
+ for (auto& p : initialParticles) {
p.pos = glm::vec2(dist(generator), dist(generator));
p.vel = glm::vec2(dist(generator), dist(generator)) * 0.1f;
}
@@ -253,11 +252,13 @@ void initSim() {
updatePipeline = device.CreateComputePipeline(&csDesc);
for (uint32_t i = 0; i < 2; ++i) {
- updateBGs[i] = utils::MakeBindGroup(device, bgl, {
- {0, updateParams, 0, sizeof(SimParams)},
- {1, particleBuffers[i], 0, kNumParticles * sizeof(Particle)},
- {2, particleBuffers[(i + 1) % 2], 0, kNumParticles * sizeof(Particle)},
- });
+ updateBGs[i] = utils::MakeBindGroup(
+ device, bgl,
+ {
+ {0, updateParams, 0, sizeof(SimParams)},
+ {1, particleBuffers[i], 0, kNumParticles * sizeof(Particle)},
+ {2, particleBuffers[(i + 1) % 2], 0, kNumParticles * sizeof(Particle)},
+ });
}
}
diff --git a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
index fbc598daa0e..378afa8db5e 100644
--- a/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
+++ b/chromium/third_party/dawn/examples/CppHelloTriangle.cpp
@@ -36,15 +36,15 @@ wgpu::BindGroup bindGroup;
void initBuffers() {
static const uint32_t indexData[3] = {
- 0, 1, 2,
+ 0,
+ 1,
+ 2,
};
indexBuffer =
utils::CreateBufferFromData(device, indexData, sizeof(indexData), wgpu::BufferUsage::Index);
static const float vertexData[12] = {
- 0.0f, 0.5f, 0.0f, 1.0f,
- -0.5f, -0.5f, 0.0f, 1.0f,
- 0.5f, -0.5f, 0.0f, 1.0f,
+ 0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
};
vertexBuffer = utils::CreateBufferFromData(device, vertexData, sizeof(vertexData),
wgpu::BufferUsage::Vertex);
@@ -141,17 +141,19 @@ void init() {
wgpu::TextureView view = texture.CreateView();
- bindGroup = utils::MakeBindGroup(device, bgl, {
- {0, sampler},
- {1, view}
- });
+ bindGroup = utils::MakeBindGroup(device, bgl, {{0, sampler}, {1, view}});
}
-struct {uint32_t a; float b;} s;
+struct {
+ uint32_t a;
+ float b;
+} s;
void frame() {
s.a = (s.a + 1) % 256;
s.b += 0.02f;
- if (s.b >= 1.0f) {s.b = 0.0f;}
+ if (s.b >= 1.0f) {
+ s.b = 0.0f;
+ }
wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView();
utils::ComboRenderPassDescriptor renderPass({backbufferView}, depthStencilView);
diff --git a/chromium/third_party/dawn/examples/CubeReflection.cpp b/chromium/third_party/dawn/examples/CubeReflection.cpp
index becec8728b4..4ff18e003c3 100644
--- a/chromium/third_party/dawn/examples/CubeReflection.cpp
+++ b/chromium/third_party/dawn/examples/CubeReflection.cpp
@@ -18,10 +18,10 @@
#include "utils/SystemUtils.h"
#include "utils/WGPUHelpers.h"
-#include <vector>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
+#include <vector>
wgpu::Device device;
@@ -43,67 +43,44 @@ wgpu::RenderPipeline planePipeline;
wgpu::RenderPipeline reflectionPipeline;
void initBuffers() {
- static const uint32_t indexData[6*6] = {
- 0, 1, 2,
- 0, 2, 3,
+ static const uint32_t indexData[6 * 6] = {0, 1, 2, 0, 2, 3,
- 4, 5, 6,
- 4, 6, 7,
+ 4, 5, 6, 4, 6, 7,
- 8, 9, 10,
- 8, 10, 11,
+ 8, 9, 10, 8, 10, 11,
- 12, 13, 14,
- 12, 14, 15,
+ 12, 13, 14, 12, 14, 15,
- 16, 17, 18,
- 16, 18, 19,
+ 16, 17, 18, 16, 18, 19,
- 20, 21, 22,
- 20, 22, 23
- };
+ 20, 21, 22, 20, 22, 23};
indexBuffer =
utils::CreateBufferFromData(device, indexData, sizeof(indexData), wgpu::BufferUsage::Index);
static const float vertexData[6 * 4 * 6] = {
- -1.0, -1.0, 1.0, 1.0, 0.0, 0.0,
- 1.0, -1.0, 1.0, 1.0, 0.0, 0.0,
- 1.0, 1.0, 1.0, 1.0, 0.0, 0.0,
- -1.0, 1.0, 1.0, 1.0, 0.0, 0.0,
-
- -1.0, -1.0, -1.0, 1.0, 1.0, 0.0,
- -1.0, 1.0, -1.0, 1.0, 1.0, 0.0,
- 1.0, 1.0, -1.0, 1.0, 1.0, 0.0,
- 1.0, -1.0, -1.0, 1.0, 1.0, 0.0,
-
- -1.0, 1.0, -1.0, 1.0, 0.0, 1.0,
- -1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
- 1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
- 1.0, 1.0, -1.0, 1.0, 0.0, 1.0,
-
- -1.0, -1.0, -1.0, 0.0, 1.0, 0.0,
- 1.0, -1.0, -1.0, 0.0, 1.0, 0.0,
- 1.0, -1.0, 1.0, 0.0, 1.0, 0.0,
- -1.0, -1.0, 1.0, 0.0, 1.0, 0.0,
-
- 1.0, -1.0, -1.0, 0.0, 1.0, 1.0,
- 1.0, 1.0, -1.0, 0.0, 1.0, 1.0,
- 1.0, 1.0, 1.0, 0.0, 1.0, 1.0,
- 1.0, -1.0, 1.0, 0.0, 1.0, 1.0,
-
- -1.0, -1.0, -1.0, 1.0, 1.0, 1.0,
- -1.0, -1.0, 1.0, 1.0, 1.0, 1.0,
- -1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
- -1.0, 1.0, -1.0, 1.0, 1.0, 1.0
- };
+ -1.0, -1.0, 1.0, 1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 1.0, 0.0, 0.0,
+ 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, -1.0, 1.0, 1.0, 1.0, 0.0, 0.0,
+
+ -1.0, -1.0, -1.0, 1.0, 1.0, 0.0, -1.0, 1.0, -1.0, 1.0, 1.0, 0.0,
+ 1.0, 1.0, -1.0, 1.0, 1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 1.0, 0.0,
+
+ -1.0, 1.0, -1.0, 1.0, 0.0, 1.0, -1.0, 1.0, 1.0, 1.0, 0.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0,
+
+ -1.0, -1.0, -1.0, 0.0, 1.0, 0.0, 1.0, -1.0, -1.0, 0.0, 1.0, 0.0,
+ 1.0, -1.0, 1.0, 0.0, 1.0, 0.0, -1.0, -1.0, 1.0, 0.0, 1.0, 0.0,
+
+ 1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, -1.0, 0.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0,
+
+ -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0,
+ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0};
vertexBuffer = utils::CreateBufferFromData(device, vertexData, sizeof(vertexData),
wgpu::BufferUsage::Vertex);
static const float planeData[6 * 4] = {
- -2.0, -1.0, -2.0, 0.5, 0.5, 0.5,
- 2.0, -1.0, -2.0, 0.5, 0.5, 0.5,
- 2.0, -1.0, 2.0, 0.5, 0.5, 0.5,
- -2.0, -1.0, 2.0, 0.5, 0.5, 0.5,
+ -2.0, -1.0, -2.0, 0.5, 0.5, 0.5, 2.0, -1.0, -2.0, 0.5, 0.5, 0.5,
+ 2.0, -1.0, 2.0, 0.5, 0.5, 0.5, -2.0, -1.0, 2.0, 0.5, 0.5, 0.5,
};
planeBuffer = utils::CreateBufferFromData(device, planeData, sizeof(planeData),
wgpu::BufferUsage::Vertex);
@@ -191,15 +168,13 @@ void init() {
transformBuffer[1] = utils::CreateBufferFromData(device, &transform, sizeof(glm::mat4),
wgpu::BufferUsage::Uniform);
- bindGroup[0] = utils::MakeBindGroup(device, bgl, {
- {0, cameraBuffer, 0, sizeof(CameraData)},
- {1, transformBuffer[0], 0, sizeof(glm::mat4)}
- });
+ bindGroup[0] = utils::MakeBindGroup(
+ device, bgl,
+ {{0, cameraBuffer, 0, sizeof(CameraData)}, {1, transformBuffer[0], 0, sizeof(glm::mat4)}});
- bindGroup[1] = utils::MakeBindGroup(device, bgl, {
- {0, cameraBuffer, 0, sizeof(CameraData)},
- {1, transformBuffer[1], 0, sizeof(glm::mat4)}
- });
+ bindGroup[1] = utils::MakeBindGroup(
+ device, bgl,
+ {{0, cameraBuffer, 0, sizeof(CameraData)}, {1, transformBuffer[1], 0, sizeof(glm::mat4)}});
depthStencilView = CreateDefaultDepthStencilView(device);
@@ -250,17 +225,20 @@ void init() {
cameraData.proj = glm::perspective(glm::radians(45.0f), 1.f, 1.0f, 100.0f);
}
-struct {uint32_t a; float b;} s;
+struct {
+ uint32_t a;
+ float b;
+} s;
void frame() {
s.a = (s.a + 1) % 256;
s.b += 0.01f;
- if (s.b >= 1.0f) {s.b = 0.0f;}
+ if (s.b >= 1.0f) {
+ s.b = 0.0f;
+ }
- cameraData.view = glm::lookAt(
- glm::vec3(8.f * std::sin(glm::radians(s.b * 360.f)), 2.f, 8.f * std::cos(glm::radians(s.b * 360.f))),
- glm::vec3(0.0f, 0.0f, 0.0f),
- glm::vec3(0.0f, 1.0f, 0.0f)
- );
+ cameraData.view = glm::lookAt(glm::vec3(8.f * std::sin(glm::radians(s.b * 360.f)), 2.f,
+ 8.f * std::cos(glm::radians(s.b * 360.f))),
+ glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
queue.WriteBuffer(cameraBuffer, 0, &cameraData, sizeof(CameraData));
diff --git a/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
index ee924d3dba8..9e3990d820b 100644
--- a/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
+++ b/chromium/third_party/dawn/examples/ManualSwapChainTest.cpp
@@ -150,7 +150,8 @@ void DoRender(WindowData* data) {
utils::ComboRenderPassDescriptor desc({view});
desc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
- desc.cColorAttachments[0].clearColor = {data->clearCycle, 1.0f - data->clearCycle, 0.0f, 1.0f};
+ desc.cColorAttachments[0].clearColor = {data->clearCycle, 1.0f - data->clearCycle, 0.0f,
+ 1.0f};
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&desc);
pass.EndPass();
diff --git a/chromium/third_party/dawn/examples/SampleUtils.cpp b/chromium/third_party/dawn/examples/SampleUtils.cpp
index 17b7c9e7c57..ee7e0061563 100644
--- a/chromium/third_party/dawn/examples/SampleUtils.cpp
+++ b/chromium/third_party/dawn/examples/SampleUtils.cpp
@@ -60,7 +60,7 @@ void PrintGLFWError(int code, const char* message) {
enum class CmdBufType {
None,
Terrible,
- //TODO(cwallez@chromium.org) double terrible cmdbuf
+ // TODO(cwallez@chromium.org): double terrible cmdbuf
};
// Default to D3D12, Metal, Vulkan, OpenGL in that order as D3D12 and Metal are the preferred on
@@ -74,7 +74,7 @@ static wgpu::BackendType backendType = wgpu::BackendType::Vulkan;
#elif defined(DAWN_ENABLE_BACKEND_OPENGL)
static wgpu::BackendType backendType = wgpu::BackendType::OpenGL;
#else
- #error
+# error
#endif
static CmdBufType cmdBufType = CmdBufType::Terrible;
@@ -136,31 +136,29 @@ wgpu::Device CreateCppDawnDevice() {
cDevice = backendDevice;
break;
- case CmdBufType::Terrible:
- {
- c2sBuf = new utils::TerribleCommandBuffer();
- s2cBuf = new utils::TerribleCommandBuffer();
+ case CmdBufType::Terrible: {
+ c2sBuf = new utils::TerribleCommandBuffer();
+ s2cBuf = new utils::TerribleCommandBuffer();
- dawn_wire::WireServerDescriptor serverDesc = {};
- serverDesc.device = backendDevice;
- serverDesc.procs = &backendProcs;
- serverDesc.serializer = s2cBuf;
+ dawn_wire::WireServerDescriptor serverDesc = {};
+ serverDesc.device = backendDevice;
+ serverDesc.procs = &backendProcs;
+ serverDesc.serializer = s2cBuf;
- wireServer = new dawn_wire::WireServer(serverDesc);
- c2sBuf->SetHandler(wireServer);
+ wireServer = new dawn_wire::WireServer(serverDesc);
+ c2sBuf->SetHandler(wireServer);
- dawn_wire::WireClientDescriptor clientDesc = {};
- clientDesc.serializer = c2sBuf;
+ dawn_wire::WireClientDescriptor clientDesc = {};
+ clientDesc.serializer = c2sBuf;
- wireClient = new dawn_wire::WireClient(clientDesc);
- WGPUDevice clientDevice = wireClient->GetDevice();
- DawnProcTable clientProcs = dawn_wire::WireClient::GetProcs();
- s2cBuf->SetHandler(wireClient);
+ wireClient = new dawn_wire::WireClient(clientDesc);
+ WGPUDevice clientDevice = wireClient->GetDevice();
+ DawnProcTable clientProcs = dawn_wire::WireClient::GetProcs();
+ s2cBuf->SetHandler(wireClient);
- procs = clientProcs;
- cDevice = clientDevice;
- }
- break;
+ procs = clientProcs;
+ cDevice = clientDevice;
+ } break;
}
dawnProcSetProcs(&procs);
@@ -221,7 +219,8 @@ bool InitSample(int argc, const char** argv) {
backendType = wgpu::BackendType::Vulkan;
continue;
}
- fprintf(stderr, "--backend expects a backend name (opengl, metal, d3d12, null, vulkan)\n");
+ fprintf(stderr,
+ "--backend expects a backend name (opengl, metal, d3d12, null, vulkan)\n");
return false;
}
if (std::string("-c") == argv[i] || std::string("--command-buffer") == argv[i]) {
diff --git a/chromium/third_party/dawn/generator/BUILD.gn b/chromium/third_party/dawn/generator/BUILD.gn
index 2970e2277c7..acf48736b3e 100644
--- a/chromium/third_party/dawn/generator/BUILD.gn
+++ b/chromium/third_party/dawn/generator/BUILD.gn
@@ -20,14 +20,15 @@ import("dawn_generator.gni")
# files but we can't just put dawn_gen_root because there are more than
# autogenerated sources there.
_stale_dirs = [
- "dawn",
- "dawn_native",
- "dawn_wire",
- "mock",
- "src"
+ "dawn",
+ "dawn_native",
+ "dawn_wire",
+ "mock",
+ "src",
]
-_allowed_output_dirs_file = "${dawn_gen_root}/removed_stale_autogen_files.allowed_output_dirs"
+_allowed_output_dirs_file =
+ "${dawn_gen_root}/removed_stale_autogen_files.allowed_output_dirs"
write_file(_allowed_output_dirs_file, dawn_allowed_gen_output_dirs)
_stale_dirs_file = "${dawn_gen_root}/removed_stale_autogen_files.stale_dirs"
@@ -52,8 +53,11 @@ action("remove_stale_autogen_files") {
# Have the "list of file" inputs as a dependency so that the action reruns
# as soon as they change.
- inputs = [_allowed_output_dirs_file, _stale_dirs_file]
+ inputs = [
+ _allowed_output_dirs_file,
+ _stale_dirs_file,
+ ]
# Output a stamp file so we don't re-run this action on every build.
- outputs = [_stamp_file]
+ outputs = [ _stamp_file ]
}
diff --git a/chromium/third_party/dawn/generator/dawn_json_generator.py b/chromium/third_party/dawn/generator/dawn_json_generator.py
index 6037aa8fab5..b7d9003e9d2 100644
--- a/chromium/third_party/dawn/generator/dawn_json_generator.py
+++ b/chromium/third_party/dawn/generator/dawn_json_generator.py
@@ -22,6 +22,7 @@ from generator_lib import Generator, run_generator, FileRender
# OBJECT MODEL
############################################################
+
class Name:
def __init__(self, name, native=False):
self.native = native
@@ -44,7 +45,8 @@ class Name:
return ''.join(self.chunks)
def camelCase(self):
- return self.chunks[0] + ''.join([self.CamelChunk(chunk) for chunk in self.chunks[1:]])
+ return self.chunks[0] + ''.join(
+ [self.CamelChunk(chunk) for chunk in self.chunks[1:]])
def CamelCase(self):
return ''.join([self.CamelChunk(chunk) for chunk in self.chunks])
@@ -63,9 +65,11 @@ class Name:
result += chunk.lower()
return result
+
def concat_names(*names):
return ' '.join([name.canonical_case() for name in names])
+
class Type:
def __init__(self, name, json_data, native=False):
self.json_data = json_data
@@ -74,7 +78,10 @@ class Type:
self.category = json_data['category']
self.javascript = self.json_data.get('javascript', True)
+
EnumValue = namedtuple('EnumValue', ['name', 'value', 'valid', 'jsrepr'])
+
+
class EnumType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
@@ -87,42 +94,55 @@ class EnumType(Type):
if value != lastValue + 1:
self.contiguousFromZero = False
lastValue = value
- self.values.append(EnumValue(
- Name(m['name']),
- value,
- m.get('valid', True),
- m.get('jsrepr', None)))
+ self.values.append(
+ EnumValue(Name(m['name']), value, m.get('valid', True),
+ m.get('jsrepr', None)))
# Assert that all values are unique in enums
all_values = set()
for value in self.values:
if value.value in all_values:
- raise Exception("Duplicate value {} in enum {}".format(value.value, name))
+ raise Exception("Duplicate value {} in enum {}".format(
+ value.value, name))
all_values.add(value.value)
+
BitmaskValue = namedtuple('BitmaskValue', ['name', 'value'])
+
+
class BitmaskType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
- self.values = [BitmaskValue(Name(m['name']), m['value']) for m in self.json_data['values']]
+ self.values = [
+ BitmaskValue(Name(m['name']), m['value'])
+ for m in self.json_data['values']
+ ]
self.full_mask = 0
for value in self.values:
self.full_mask = self.full_mask | value.value
+
class CallbackType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
self.arguments = []
+
class NativeType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data, native=True)
+
# Methods and structures are both "records", so record members correspond to
# method arguments or structure members.
class RecordMember:
- def __init__(self, name, typ, annotation, optional=False,
- is_return_value=False, default_value=None,
+ def __init__(self,
+ name,
+ typ,
+ annotation,
+ optional=False,
+ is_return_value=False,
+ default_value=None,
skip_serialize=False):
self.name = name
self.type = typ
@@ -138,13 +158,17 @@ class RecordMember:
assert self.type.dict_name == "ObjectHandle"
self.handle_type = handle_type
+
Method = namedtuple('Method', ['name', 'return_type', 'arguments'])
+
+
class ObjectType(Type):
def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
self.methods = []
self.built_type = None
+
class Record:
def __init__(self, name):
self.name = Name(name)
@@ -160,12 +184,15 @@ class Record:
else:
return False
- self.may_have_dawn_object = any(may_have_dawn_object(member) for member in self.members)
+ self.may_have_dawn_object = any(
+ may_have_dawn_object(member) for member in self.members)
- # set may_have_dawn_object to true if the type is chained or extensible. Chained structs
- # may contain a Dawn object.
+ # Set may_have_dawn_object to true if the type is chained or
+ # extensible. Chained structs may contain a Dawn object.
if isinstance(self, StructureType):
- self.may_have_dawn_object = self.may_have_dawn_object or self.chained or self.extensible
+ self.may_have_dawn_object = (self.may_have_dawn_object
+ or self.chained or self.extensible)
+
class StructureType(Record, Type):
def __init__(self, name, json_data):
@@ -173,9 +200,11 @@ class StructureType(Record, Type):
Type.__init__(self, name, json_data)
self.chained = json_data.get("chained", False)
self.extensible = json_data.get("extensible", False)
- # Chained structs inherit from wgpu::ChainedStruct which has nextInChain so setting
- # both extensible and chained would result in two nextInChain members.
- assert(not (self.extensible and self.chained))
+ # Chained structs inherit from wgpu::ChainedStruct, which has
+ # nextInChain, so setting both extensible and chained would result in
+ # two nextInChain members.
+ assert not (self.extensible and self.chained)
+
class Command(Record):
def __init__(self, name, members=None):
@@ -184,11 +213,13 @@ class Command(Record):
self.derived_object = None
self.derived_method = None
+
def linked_record_members(json_data, types):
members = []
members_by_name = {}
for m in json_data:
- member = RecordMember(Name(m['name']), types[m['type']],
+ member = RecordMember(Name(m['name']),
+ types[m['type']],
m.get('annotation', 'value'),
optional=m.get('optional', False),
is_return_value=m.get('is_return_value', False),
@@ -207,7 +238,7 @@ def linked_record_members(json_data, types):
member.length = "constant"
member.constant_length = 1
else:
- assert(False)
+ assert False
elif m['length'] == 'strlen':
member.length = 'strlen'
else:
@@ -215,31 +246,42 @@ def linked_record_members(json_data, types):
return members
+
############################################################
# PARSE
############################################################
+
def link_object(obj, types):
def make_method(json_data):
arguments = linked_record_members(json_data.get('args', []), types)
- return Method(Name(json_data['name']), types[json_data.get('returns', 'void')], arguments)
+ return Method(Name(json_data['name']),
+ types[json_data.get('returns', 'void')], arguments)
obj.methods = [make_method(m) for m in obj.json_data.get('methods', [])]
obj.methods.sort(key=lambda method: method.name.canonical_case())
+
def link_structure(struct, types):
struct.members = linked_record_members(struct.json_data['members'], types)
+
def link_callback(callback, types):
- callback.arguments = linked_record_members(callback.json_data['args'], types)
-
-# Sort structures so that if struct A has struct B as a member, then B is listed before A
-# This is a form of topological sort where we try to keep the order reasonably similar to the
-# original order (though th sort isn't technically stable).
-# It works by computing for each struct type what is the depth of its DAG of dependents, then
-# resorting based on that depth using Python's stable sort. This makes a toposort because if
-# A depends on B then its depth will be bigger than B's. It is also nice because all nodes
-# with the same depth are kept in the input order.
+ callback.arguments = linked_record_members(callback.json_data['args'],
+ types)
+
+
+# Sort structures so that if struct A has struct B as a member, then B is
+# listed before A.
+#
+# This is a form of topological sort where we try to keep the order reasonably
+# similar to the original order (though the sort isn't technically stable).
+#
+# It works by computing for each struct type what is the depth of its DAG of
+# dependents, then resorting based on that depth using Python's stable sort.
+# This makes a toposort because if A depends on B then its depth will be bigger
+# than B's. It is also nice because all nodes with the same depth are kept in
+# the input order.
def topo_sort_structure(structs):
for struct in structs:
struct.visited = False
@@ -252,7 +294,8 @@ def topo_sort_structure(structs):
max_dependent_depth = 0
for member in struct.members:
if member.type.category == 'structure':
- max_dependent_depth = max(max_dependent_depth, compute_depth(member.type) + 1)
+ max_dependent_depth = max(max_dependent_depth,
+ compute_depth(member.type) + 1)
struct.subdag_depth = max_dependent_depth
struct.visited = True
@@ -269,6 +312,7 @@ def topo_sort_structure(structs):
return result
+
def parse_json(json):
category_to_parser = {
'bitmask': BitmaskType,
@@ -303,22 +347,22 @@ def parse_json(json):
link_callback(callback, types)
for category in by_category.keys():
- by_category[category] = sorted(by_category[category], key=lambda typ: typ.name.canonical_case())
+ by_category[category] = sorted(
+ by_category[category], key=lambda typ: typ.name.canonical_case())
by_category['structure'] = topo_sort_structure(by_category['structure'])
for struct in by_category['structure']:
struct.update_metadata()
- return {
- 'types': types,
- 'by_category': by_category
- }
+ return {'types': types, 'by_category': by_category}
+
############################################################
# WIRE STUFF
############################################################
+
# Create wire commands from api methods
def compute_wire_params(api_params, wire_json):
wire_params = api_params.copy()
@@ -327,7 +371,8 @@ def compute_wire_params(api_params, wire_json):
commands = []
return_commands = []
- wire_json['special items']['client_handwritten_commands'] += wire_json['special items']['client_side_commands']
+ wire_json['special items']['client_handwritten_commands'] += wire_json[
+ 'special items']['client_side_commands']
# Generate commands from object methods
for api_object in wire_params['by_category']['object']:
@@ -335,21 +380,33 @@ def compute_wire_params(api_params, wire_json):
command_name = concat_names(api_object.name, method.name)
command_suffix = Name(command_name).CamelCase()
- # Only object return values or void are supported. Other methods must be handwritten.
- if method.return_type.category != 'object' and method.return_type.name.canonical_case() != 'void':
- assert(command_suffix in wire_json['special items']['client_handwritten_commands'])
+ # Only object return values or void are supported.
+ # Other methods must be handwritten.
+ is_object = method.return_type.category == 'object'
+ is_void = method.return_type.name.canonical_case() == 'void'
+ if not (is_object or is_void):
+ assert command_suffix in (
+ wire_json['special items']['client_handwritten_commands'])
continue
- if command_suffix in wire_json['special items']['client_side_commands']:
+ if command_suffix in (
+ wire_json['special items']['client_side_commands']):
continue
# Create object method commands by prepending "self"
- members = [RecordMember(Name('self'), types[api_object.dict_name], 'value')]
+ members = [
+ RecordMember(Name('self'), types[api_object.dict_name],
+ 'value')
+ ]
members += method.arguments
- # Client->Server commands that return an object return the result object handle
+ # Client->Server commands that return an object return the
+ # result object handle
if method.return_type.category == 'object':
- result = RecordMember(Name('result'), types['ObjectHandle'], 'value', is_return_value=True)
+ result = RecordMember(Name('result'),
+ types['ObjectHandle'],
+ 'value',
+ is_return_value=True)
result.set_handle_type(method.return_type)
members.append(result)
@@ -362,7 +419,8 @@ def compute_wire_params(api_params, wire_json):
commands.append(Command(name, linked_record_members(json_data, types)))
for (name, json_data) in wire_json['return commands'].items():
- return_commands.append(Command(name, linked_record_members(json_data, types)))
+ return_commands.append(
+ Command(name, linked_record_members(json_data, types)))
wire_params['cmd_records'] = {
'command': commands,
@@ -378,12 +436,16 @@ def compute_wire_params(api_params, wire_json):
return wire_params
+
#############################################################
# Generator
#############################################################
+
def as_varName(*names):
- return names[0].camelCase() + ''.join([name.CamelCase() for name in names[1:]])
+ return names[0].camelCase() + ''.join(
+ [name.CamelCase() for name in names[1:]])
+
def as_cType(name):
if name.native:
@@ -391,27 +453,32 @@ def as_cType(name):
else:
return 'WGPU' + name.CamelCase()
+
def as_cTypeDawn(name):
if name.native:
return name.concatcase()
else:
return 'Dawn' + name.CamelCase()
+
def as_cTypeEnumSpecialCase(typ):
if typ.category == 'bitmask':
return as_cType(typ.name) + 'Flags'
return as_cType(typ.name)
+
def as_cppType(name):
if name.native:
return name.concatcase()
else:
return name.CamelCase()
+
def as_jsEnumValue(value):
if value.jsrepr: return value.jsrepr
return "'" + value.name.js_enum_case() + "'"
+
def convert_cType_to_cppType(typ, annotation, arg, indent=0):
if typ.category == 'native':
return arg
@@ -422,18 +489,20 @@ def convert_cType_to_cppType(typ, annotation, arg, indent=0):
converted_members = [
convert_cType_to_cppType(
member.type, member.annotation,
- '{}.{}'.format(arg, as_varName(member.name)),
- indent + 1)
- for member in typ.members]
+ '{}.{}'.format(arg, as_varName(member.name)), indent + 1)
+ for member in typ.members
+ ]
- converted_members = [(' ' * 4) + m for m in converted_members ]
+ converted_members = [(' ' * 4) + m for m in converted_members]
converted_members = ',\n'.join(converted_members)
return as_cppType(typ.name) + ' {\n' + converted_members + '\n}'
else:
return 'static_cast<{}>({})'.format(as_cppType(typ.name), arg)
else:
- return 'reinterpret_cast<{} {}>({})'.format(as_cppType(typ.name), annotation, arg)
+ return 'reinterpret_cast<{} {}>({})'.format(as_cppType(typ.name),
+ annotation, arg)
+
def decorate(name, typ, arg):
if arg.annotation == 'value':
@@ -445,46 +514,57 @@ def decorate(name, typ, arg):
elif arg.annotation == 'const*const*':
return 'const ' + typ + '* const * ' + name
else:
- assert(False)
+ assert False
+
def annotated(typ, arg):
name = as_varName(arg.name)
return decorate(name, typ, arg)
+
def as_cEnum(type_name, value_name):
- assert(not type_name.native and not value_name.native)
+ assert not type_name.native and not value_name.native
return 'WGPU' + type_name.CamelCase() + '_' + value_name.CamelCase()
+
def as_cEnumDawn(type_name, value_name):
- assert(not type_name.native and not value_name.native)
- return 'DAWN' + '_' + type_name.SNAKE_CASE() + '_' + value_name.SNAKE_CASE()
+ assert not type_name.native and not value_name.native
+ return ('DAWN' + '_' + type_name.SNAKE_CASE() + '_' +
+ value_name.SNAKE_CASE())
+
def as_cppEnum(value_name):
- assert(not value_name.native)
+ assert not value_name.native
if value_name.concatcase()[0].isdigit():
return "e" + value_name.CamelCase()
return value_name.CamelCase()
+
def as_cMethod(type_name, method_name):
- assert(not type_name.native and not method_name.native)
+ assert not type_name.native and not method_name.native
return 'wgpu' + type_name.CamelCase() + method_name.CamelCase()
+
def as_cMethodDawn(type_name, method_name):
- assert(not type_name.native and not method_name.native)
+ assert not type_name.native and not method_name.native
return 'dawn' + type_name.CamelCase() + method_name.CamelCase()
+
def as_MethodSuffix(type_name, method_name):
- assert(not type_name.native and not method_name.native)
+ assert not type_name.native and not method_name.native
return type_name.CamelCase() + method_name.CamelCase()
+
def as_cProc(type_name, method_name):
- assert(not type_name.native and not method_name.native)
+ assert not type_name.native and not method_name.native
return 'WGPU' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
+
def as_cProcDawn(type_name, method_name):
- assert(not type_name.native and not method_name.native)
+ assert not type_name.native and not method_name.native
return 'Dawn' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
+
def as_frontendType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + 'Base*'
@@ -495,6 +575,7 @@ def as_frontendType(typ):
else:
return as_cType(typ.name)
+
def as_wireType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + '*'
@@ -503,31 +584,50 @@ def as_wireType(typ):
else:
return as_cppType(typ.name)
+
def c_methods(types, typ):
return typ.methods + [
Method(Name('reference'), types['void'], []),
Method(Name('release'), types['void'], []),
]
+
def get_c_methods_sorted_by_name(api_params):
unsorted = [(as_MethodSuffix(typ.name, method.name), typ, method) \
for typ in api_params['by_category']['object'] \
for method in c_methods(api_params['types'], typ) ]
return [(typ, method) for (_, typ, method) in sorted(unsorted)]
+
def has_callback_arguments(method):
return any(arg.type.category == 'callback' for arg in method.arguments)
+
class MultiGeneratorFromDawnJSON(Generator):
def get_description(self):
return 'Generates code for various target from Dawn.json.'
def add_commandline_arguments(self, parser):
- allowed_targets = ['dawn_headers', 'dawncpp_headers', 'dawncpp', 'dawn_proc', 'mock_webgpu', 'dawn_wire', "dawn_native_utils"]
-
- parser.add_argument('--dawn-json', required=True, type=str, help ='The DAWN JSON definition to use.')
- parser.add_argument('--wire-json', default=None, type=str, help='The DAWN WIRE JSON definition to use.')
- parser.add_argument('--targets', required=True, type=str, help='Comma-separated subset of targets to output. Available targets: ' + ', '.join(allowed_targets))
+ allowed_targets = [
+ 'dawn_headers', 'dawncpp_headers', 'dawncpp', 'dawn_proc',
+ 'mock_webgpu', 'dawn_wire', "dawn_native_utils"
+ ]
+
+ parser.add_argument('--dawn-json',
+ required=True,
+ type=str,
+ help='The DAWN JSON definition to use.')
+ parser.add_argument('--wire-json',
+ default=None,
+ type=str,
+ help='The DAWN WIRE JSON definition to use.')
+ parser.add_argument(
+ '--targets',
+ required=True,
+ type=str,
+ help=
+ 'Comma-separated subset of targets to output. Available targets: '
+ + ', '.join(allowed_targets))
def get_file_renders(self, args):
with open(args.dawn_json) as f:
@@ -543,9 +643,10 @@ class MultiGeneratorFromDawnJSON(Generator):
base_params = {
'Name': lambda name: Name(name),
-
- 'as_annotated_cType': lambda arg: annotated(as_cTypeEnumSpecialCase(arg.type), arg),
- 'as_annotated_cppType': lambda arg: annotated(as_cppType(arg.type.name), arg),
+ 'as_annotated_cType': \
+ lambda arg: annotated(as_cTypeEnumSpecialCase(arg.type), arg),
+ 'as_annotated_cppType': \
+ lambda arg: annotated(as_cppType(arg.type.name), arg),
'as_cEnum': as_cEnum,
'as_cEnumDawn': as_cEnumDawn,
'as_cppEnum': as_cppEnum,
@@ -562,79 +663,145 @@ class MultiGeneratorFromDawnJSON(Generator):
'as_varName': as_varName,
'decorate': decorate,
'c_methods': lambda typ: c_methods(api_params['types'], typ),
- 'c_methods_sorted_by_name': get_c_methods_sorted_by_name(api_params),
+ 'c_methods_sorted_by_name': \
+ get_c_methods_sorted_by_name(api_params),
}
renders = []
if 'dawn_headers' in targets:
- renders.append(FileRender('webgpu.h', 'src/include/dawn/webgpu.h', [base_params, api_params]))
- renders.append(FileRender('dawn_proc_table.h', 'src/include/dawn/dawn_proc_table.h', [base_params, api_params]))
+ renders.append(
+ FileRender('webgpu.h', 'src/include/dawn/webgpu.h',
+ [base_params, api_params]))
+ renders.append(
+ FileRender('dawn_proc_table.h',
+ 'src/include/dawn/dawn_proc_table.h',
+ [base_params, api_params]))
if 'dawncpp_headers' in targets:
- renders.append(FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h', [base_params, api_params]))
+ renders.append(
+ FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h',
+ [base_params, api_params]))
if 'dawn_proc' in targets:
- renders.append(FileRender('dawn_proc.c', 'src/dawn/dawn_proc.c', [base_params, api_params]))
+ renders.append(
+ FileRender('dawn_proc.c', 'src/dawn/dawn_proc.c',
+ [base_params, api_params]))
if 'dawncpp' in targets:
- renders.append(FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp', [base_params, api_params]))
+ renders.append(
+ FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp',
+ [base_params, api_params]))
if 'emscripten_bits' in targets:
- renders.append(FileRender('webgpu_struct_info.json', 'src/dawn/webgpu_struct_info.json', [base_params, api_params]))
- renders.append(FileRender('library_webgpu_enum_tables.js', 'src/dawn/library_webgpu_enum_tables.js', [base_params, api_params]))
+ renders.append(
+ FileRender('webgpu_struct_info.json',
+ 'src/dawn/webgpu_struct_info.json',
+ [base_params, api_params]))
+ renders.append(
+ FileRender('library_webgpu_enum_tables.js',
+ 'src/dawn/library_webgpu_enum_tables.js',
+ [base_params, api_params]))
if 'mock_webgpu' in targets:
mock_params = [
- base_params,
- api_params,
- {
+ base_params, api_params, {
'has_callback_arguments': has_callback_arguments
}
]
- renders.append(FileRender('mock_webgpu.h', 'src/dawn/mock_webgpu.h', mock_params))
- renders.append(FileRender('mock_webgpu.cpp', 'src/dawn/mock_webgpu.cpp', mock_params))
+ renders.append(
+ FileRender('mock_webgpu.h', 'src/dawn/mock_webgpu.h',
+ mock_params))
+ renders.append(
+ FileRender('mock_webgpu.cpp', 'src/dawn/mock_webgpu.cpp',
+ mock_params))
if 'dawn_native_utils' in targets:
frontend_params = [
base_params,
api_params,
{
- 'as_frontendType': lambda typ: as_frontendType(typ), # TODO as_frontendType and friends take a Type and not a Name :(
- 'as_annotated_frontendType': lambda arg: annotated(as_frontendType(arg.type), arg)
+ # TODO: as_frontendType and co. take a Type, not a Name :(
+ 'as_frontendType': lambda typ: as_frontendType(typ),
+ 'as_annotated_frontendType': \
+ lambda arg: annotated(as_frontendType(arg.type), arg),
}
]
- renders.append(FileRender('dawn_native/ValidationUtils.h', 'src/dawn_native/ValidationUtils_autogen.h', frontend_params))
- renders.append(FileRender('dawn_native/ValidationUtils.cpp', 'src/dawn_native/ValidationUtils_autogen.cpp', frontend_params))
- renders.append(FileRender('dawn_native/wgpu_structs.h', 'src/dawn_native/wgpu_structs_autogen.h', frontend_params))
- renders.append(FileRender('dawn_native/wgpu_structs.cpp', 'src/dawn_native/wgpu_structs_autogen.cpp', frontend_params))
- renders.append(FileRender('dawn_native/ProcTable.cpp', 'src/dawn_native/ProcTable.cpp', frontend_params))
+ renders.append(
+ FileRender('dawn_native/ValidationUtils.h',
+ 'src/dawn_native/ValidationUtils_autogen.h',
+ frontend_params))
+ renders.append(
+ FileRender('dawn_native/ValidationUtils.cpp',
+ 'src/dawn_native/ValidationUtils_autogen.cpp',
+ frontend_params))
+ renders.append(
+ FileRender('dawn_native/wgpu_structs.h',
+ 'src/dawn_native/wgpu_structs_autogen.h',
+ frontend_params))
+ renders.append(
+ FileRender('dawn_native/wgpu_structs.cpp',
+ 'src/dawn_native/wgpu_structs_autogen.cpp',
+ frontend_params))
+ renders.append(
+ FileRender('dawn_native/ProcTable.cpp',
+ 'src/dawn_native/ProcTable.cpp', frontend_params))
if 'dawn_wire' in targets:
additional_params = compute_wire_params(api_params, wire_json)
wire_params = [
- base_params,
- api_params,
- {
+ base_params, api_params, {
'as_wireType': as_wireType,
- 'as_annotated_wireType': lambda arg: annotated(as_wireType(arg.type), arg),
- },
- additional_params
+ 'as_annotated_wireType': \
+ lambda arg: annotated(as_wireType(arg.type), arg),
+ }, additional_params
]
- renders.append(FileRender('dawn_wire/WireCmd.h', 'src/dawn_wire/WireCmd_autogen.h', wire_params))
- renders.append(FileRender('dawn_wire/WireCmd.cpp', 'src/dawn_wire/WireCmd_autogen.cpp', wire_params))
- renders.append(FileRender('dawn_wire/client/ApiObjects.h', 'src/dawn_wire/client/ApiObjects_autogen.h', wire_params))
- renders.append(FileRender('dawn_wire/client/ApiProcs.cpp', 'src/dawn_wire/client/ApiProcs_autogen.cpp', wire_params))
- renders.append(FileRender('dawn_wire/client/ApiProcs.h', 'src/dawn_wire/client/ApiProcs_autogen.h', wire_params))
- renders.append(FileRender('dawn_wire/client/ClientBase.h', 'src/dawn_wire/client/ClientBase_autogen.h', wire_params))
- renders.append(FileRender('dawn_wire/client/ClientHandlers.cpp', 'src/dawn_wire/client/ClientHandlers_autogen.cpp', wire_params))
- renders.append(FileRender('dawn_wire/client/ClientPrototypes.inc', 'src/dawn_wire/client/ClientPrototypes_autogen.inc', wire_params))
- renders.append(FileRender('dawn_wire/server/ServerBase.h', 'src/dawn_wire/server/ServerBase_autogen.h', wire_params))
- renders.append(FileRender('dawn_wire/server/ServerDoers.cpp', 'src/dawn_wire/server/ServerDoers_autogen.cpp', wire_params))
- renders.append(FileRender('dawn_wire/server/ServerHandlers.cpp', 'src/dawn_wire/server/ServerHandlers_autogen.cpp', wire_params))
- renders.append(FileRender('dawn_wire/server/ServerPrototypes.inc', 'src/dawn_wire/server/ServerPrototypes_autogen.inc', wire_params))
+ renders.append(
+ FileRender('dawn_wire/WireCmd.h',
+ 'src/dawn_wire/WireCmd_autogen.h', wire_params))
+ renders.append(
+ FileRender('dawn_wire/WireCmd.cpp',
+ 'src/dawn_wire/WireCmd_autogen.cpp', wire_params))
+ renders.append(
+ FileRender('dawn_wire/client/ApiObjects.h',
+ 'src/dawn_wire/client/ApiObjects_autogen.h',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/client/ApiProcs.cpp',
+ 'src/dawn_wire/client/ApiProcs_autogen.cpp',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/client/ClientBase.h',
+ 'src/dawn_wire/client/ClientBase_autogen.h',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/client/ClientHandlers.cpp',
+ 'src/dawn_wire/client/ClientHandlers_autogen.cpp',
+ wire_params))
+ renders.append(
+ FileRender(
+ 'dawn_wire/client/ClientPrototypes.inc',
+ 'src/dawn_wire/client/ClientPrototypes_autogen.inc',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/server/ServerBase.h',
+ 'src/dawn_wire/server/ServerBase_autogen.h',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/server/ServerDoers.cpp',
+ 'src/dawn_wire/server/ServerDoers_autogen.cpp',
+ wire_params))
+ renders.append(
+ FileRender('dawn_wire/server/ServerHandlers.cpp',
+ 'src/dawn_wire/server/ServerHandlers_autogen.cpp',
+ wire_params))
+ renders.append(
+ FileRender(
+ 'dawn_wire/server/ServerPrototypes.inc',
+ 'src/dawn_wire/server/ServerPrototypes_autogen.inc',
+ wire_params))
return renders
@@ -644,5 +811,6 @@ class MultiGeneratorFromDawnJSON(Generator):
deps += [os.path.abspath(args.wire_json)]
return deps
+
if __name__ == '__main__':
sys.exit(run_generator(MultiGeneratorFromDawnJSON()))
diff --git a/chromium/third_party/dawn/generator/generator_lib.gni b/chromium/third_party/dawn/generator/generator_lib.gni
index 36ac72f2f60..8b9e04cc955 100644
--- a/chromium/third_party/dawn/generator/generator_lib.gni
+++ b/chromium/third_party/dawn/generator/generator_lib.gni
@@ -135,9 +135,7 @@ template("generator_lib_action") {
# outputs are what's expected and write a depfile for Ninja.
action(_json_tarball_target) {
script = invoker.script
- outputs = [
- _json_tarball,
- ]
+ outputs = [ _json_tarball ]
depfile = _json_tarball_depfile
args = _generator_args
if (defined(invoker.deps)) {
@@ -153,12 +151,8 @@ template("generator_lib_action") {
rebase_path(_gen_dir, root_build_dir),
]
- deps = [
- ":${_json_tarball_target}",
- ]
- inputs = [
- _json_tarball,
- ]
+ deps = [ ":${_json_tarball_target}" ]
+ inputs = [ _json_tarball ]
# The expected output list is relative to the gen_dir but action
# target outputs are from the root dir so we need to rebase them.
diff --git a/chromium/third_party/dawn/generator/generator_lib.py b/chromium/third_party/dawn/generator/generator_lib.py
index 8deec5d1949..5e3734d7833 100644
--- a/chromium/third_party/dawn/generator/generator_lib.py
+++ b/chromium/third_party/dawn/generator/generator_lib.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Module to create generators that render multiple Jinja2 templates for GN.
A helper module that can be used to create generator scripts (clients)
@@ -54,6 +53,7 @@ from collections import namedtuple
#
FileRender = namedtuple('FileRender', ['template', 'output', 'params_dicts'])
+
# The interface that must be implemented by generators.
class Generator:
def get_description(self):
@@ -72,6 +72,7 @@ class Generator:
"""Return a list of extra input dependencies."""
return []
+
# Allow custom Jinja2 installation path through an additional python
# path from the arguments if present. This isn't done through the regular
# argparse because PreprocessingLoader uses jinja2 in the global scope before
@@ -92,6 +93,7 @@ except ValueError:
import jinja2
+
# A custom Jinja2 template loader that removes the extra indentation
# of the template blocks so that the output is correctly indented
class _PreprocessingLoader(jinja2.BaseLoader):
@@ -113,18 +115,21 @@ class _PreprocessingLoader(jinja2.BaseLoader):
def preprocess(self, source):
lines = source.split('\n')
- # Compute the current indentation level of the template blocks and remove their indentation
+ # Compute the current indentation level of the template blocks and
+ # remove their indentation
result = []
indentation_level = 0
- # Filter lines that are pure comments. line_comment_prefix is not enough because it removes
- # the comment but doesn't completely remove the line, resulting in more verbose output.
+ # Filter lines that are pure comments. line_comment_prefix is not
+ # enough because it removes the comment but doesn't completely remove
+ # the line, resulting in more verbose output.
lines = filter(lambda line: not line.strip().startswith('//*'), lines)
# Remove indentation templates have for the Jinja control flow.
for line in lines:
- # The capture in the regex adds one element per block start or end so we divide by two
- # there is also an extra line chunk corresponding to the line end, so we substract it.
+ # The capture in the regex adds one element per block start or end,
+ # so we divide by two. There is also an extra line chunk
+ # corresponding to the line end, so we subtract it.
numends = (len(self.blockend.split(line)) - 1) // 2
indentation_level -= numends
@@ -142,14 +147,19 @@ class _PreprocessingLoader(jinja2.BaseLoader):
elif line.startswith('\t'):
line = line[1:]
else:
- assert(line.strip() == '')
+ assert line.strip() == ''
return line
+
_FileOutput = namedtuple('FileOutput', ['name', 'content'])
+
def _do_renders(renders, template_dir):
loader = _PreprocessingLoader(template_dir)
- env = jinja2.Environment(loader=loader, lstrip_blocks=True, trim_blocks=True, line_comment_prefix='//*')
+ env = jinja2.Environment(loader=loader,
+ lstrip_blocks=True,
+ trim_blocks=True,
+ line_comment_prefix='//*')
def do_assert(expr):
assert expr
@@ -177,16 +187,17 @@ def _do_renders(renders, template_dir):
return outputs
+
# Compute the list of imported, non-system Python modules.
# It assumes that any path outside of the root directory is system.
-def _compute_python_dependencies(root_dir = None):
+def _compute_python_dependencies(root_dir=None):
if not root_dir:
# Assume this script is under generator/ by default.
root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
root_dir = os.path.abspath(root_dir)
module_paths = (module.__file__ for module in sys.modules.values()
- if module and hasattr(module, '__file__'))
+ if module and hasattr(module, '__file__'))
paths = set()
for path in module_paths:
@@ -203,37 +214,85 @@ def _compute_python_dependencies(root_dir = None):
return paths
+
def run_generator(generator):
parser = argparse.ArgumentParser(
- description = generator.get_description(),
- formatter_class = argparse.ArgumentDefaultsHelpFormatter,
+ description=generator.get_description(),
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
- generator.add_commandline_arguments(parser);
- parser.add_argument('--template-dir', default='templates', type=str, help='Directory with template files.')
- parser.add_argument(kJinja2Path, default=None, type=str, help='Additional python path to set before loading Jinja2')
- parser.add_argument('--output-json-tarball', default=None, type=str, help='Name of the "JSON tarball" to create (tar is too annoying to use in python).')
- parser.add_argument('--depfile', default=None, type=str, help='Name of the Ninja depfile to create for the JSON tarball')
- parser.add_argument('--expected-outputs-file', default=None, type=str, help="File to compare outputs with and fail if it doesn't match")
- parser.add_argument('--root-dir', default=None, type=str, help='Optional source root directory for Python dependency computations')
- parser.add_argument('--allowed-output-dirs-file', default=None, type=str, help="File containing a list of allowed directories where files can be output.")
- parser.add_argument('--print-cmake-dependencies', default=False, action="store_true", help="Prints a semi-colon separated list of dependencies to stdout and exits.")
- parser.add_argument('--print-cmake-outputs', default=False, action="store_true", help="Prints a semi-colon separated list of outputs to stdout and exits.")
- parser.add_argument('--output-dir', default=None, type=str, help='Directory where to output generate files.')
+ generator.add_commandline_arguments(parser)
+ parser.add_argument('--template-dir',
+ default='templates',
+ type=str,
+ help='Directory with template files.')
+ parser.add_argument(
+ kJinja2Path,
+ default=None,
+ type=str,
+ help='Additional python path to set before loading Jinja2')
+ parser.add_argument(
+ '--output-json-tarball',
+ default=None,
+ type=str,
+ help=('Name of the "JSON tarball" to create (tar is too annoying '
+ 'to use in python).'))
+ parser.add_argument(
+ '--depfile',
+ default=None,
+ type=str,
+ help='Name of the Ninja depfile to create for the JSON tarball')
+ parser.add_argument(
+ '--expected-outputs-file',
+ default=None,
+ type=str,
+ help="File to compare outputs with and fail if it doesn't match")
+ parser.add_argument(
+ '--root-dir',
+ default=None,
+ type=str,
+ help=('Optional source root directory for Python dependency '
+ 'computations'))
+ parser.add_argument(
+ '--allowed-output-dirs-file',
+ default=None,
+ type=str,
+ help=("File containing a list of allowed directories where files "
+ "can be output."))
+ parser.add_argument(
+ '--print-cmake-dependencies',
+ default=False,
+ action="store_true",
+ help=("Prints a semi-colon separated list of dependencies to "
+ "stdout and exits."))
+ parser.add_argument(
+ '--print-cmake-outputs',
+ default=False,
+ action="store_true",
+ help=("Prints a semi-colon separated list of outputs to "
+ "stdout and exits."))
+ parser.add_argument('--output-dir',
+ default=None,
+ type=str,
+ help='Directory where to output generate files.')
args = parser.parse_args()
- renders = generator.get_file_renders(args);
+ renders = generator.get_file_renders(args)
# Output a list of all dependencies for CMake or the tarball for GN/Ninja.
if args.depfile != None or args.print_cmake_dependencies:
dependencies = generator.get_dependencies(args)
- dependencies += [args.template_dir + os.path.sep + render.template for render in renders]
+ dependencies += [
+ args.template_dir + os.path.sep + render.template
+ for render in renders
+ ]
dependencies += _compute_python_dependencies(args.root_dir)
if args.depfile != None:
with open(args.depfile, 'w') as f:
- f.write(args.output_json_tarball + ": " + " ".join(dependencies))
+ f.write(args.output_json_tarball + ": " +
+ " ".join(dependencies))
if args.print_cmake_dependencies:
sys.stdout.write(";".join(dependencies))
@@ -248,33 +307,42 @@ def run_generator(generator):
actual = {render.output for render in renders}
if actual != expected:
- print("Wrong expected outputs, caller expected:\n " + repr(sorted(expected)))
+ print("Wrong expected outputs, caller expected:\n " +
+ repr(sorted(expected)))
print("Actual output:\n " + repr(sorted(actual)))
return 1
# Print the list of all the outputs for cmake.
if args.print_cmake_outputs:
- sys.stdout.write(";".join([os.path.join(args.output_dir, render.output) for render in renders]))
+ sys.stdout.write(";".join([
+ os.path.join(args.output_dir, render.output) for render in renders
+ ]))
return 0
outputs = _do_renders(renders, args.template_dir)
- # The caller wants to assert that the outputs are only in specific directories.
+ # The caller wants to assert that the outputs are only in specific
+ # directories.
if args.allowed_output_dirs_file != None:
with open(args.allowed_output_dirs_file) as f:
allowed_dirs = set([line.strip() for line in f.readlines()])
for directory in allowed_dirs:
if not directory.endswith('/'):
- print('Allowed directory entry "{}" doesn\'t end with /'.format(directory))
+ print('Allowed directory entry "{}" doesn\'t '
+ 'end with /'.format(directory))
return 1
def check_in_subdirectory(path, directory):
- return path.startswith(directory) and not '/' in path[len(directory):]
+ return path.startswith(
+ directory) and not '/' in path[len(directory):]
for render in renders:
- if not any(check_in_subdirectory(render.output, directory) for directory in allowed_dirs):
- print('Output file "{}" is not in the allowed directory list below:'.format(render.output))
+ if not any(
+ check_in_subdirectory(render.output, directory)
+ for directory in allowed_dirs):
+ print('Output file "{}" is not in the allowed directory '
+ 'list below:'.format(render.output))
for directory in sorted(allowed_dirs):
print(' "{}"'.format(directory))
return 1
diff --git a/chromium/third_party/dawn/generator/opengl_loader_generator.py b/chromium/third_party/dawn/generator/opengl_loader_generator.py
index 47721e2d4fa..e080c4e800b 100644
--- a/chromium/third_party/dawn/generator/opengl_loader_generator.py
+++ b/chromium/third_party/dawn/generator/opengl_loader_generator.py
@@ -19,9 +19,10 @@ import xml.etree.ElementTree as etree
from generator_lib import Generator, run_generator, FileRender
+
class ProcName:
def __init__(self, gl_name, proc_name=None):
- assert(gl_name.startswith('gl'))
+ assert gl_name.startswith('gl')
if proc_name == None:
proc_name = gl_name[2:]
@@ -40,7 +41,10 @@ class ProcName:
def __repr__(self):
return 'Proc("{}", "{}")'.format(self.gl_name, self.proc_name)
+
ProcParam = namedtuple('ProcParam', ['name', 'type'])
+
+
class Proc:
def __init__(self, element):
# Type declaration for return values and arguments all have the same
@@ -72,7 +76,9 @@ class Proc:
self.params = []
for param in element.findall('./param'):
- self.params.append(ProcParam(param.find('name').text, parse_type_declaration(param)))
+ self.params.append(
+ ProcParam(
+ param.find('name').text, parse_type_declaration(param)))
self.gl_name = proto.find('name').text
self.alias = None
@@ -83,7 +89,7 @@ class Proc:
return self.gl_name
def ProcName(self):
- assert(self.gl_name.startswith('gl'))
+ assert self.gl_name.startswith('gl')
return self.gl_name[2:]
def PFNGLPROCNAME(self):
@@ -92,11 +98,14 @@ class Proc:
def __repr__(self):
return 'Proc("{}")'.format(self.gl_name)
+
EnumDefine = namedtuple('EnumDefine', ['name', 'value'])
Version = namedtuple('Version', ['major', 'minor'])
VersionBlock = namedtuple('VersionBlock', ['version', 'procs', 'enums'])
HeaderBlock = namedtuple('HeaderBlock', ['description', 'procs', 'enums'])
-ExtensionBlock = namedtuple('ExtensionBlock', ['extension', 'procs', 'enums', 'supported_specs'])
+ExtensionBlock = namedtuple('ExtensionBlock',
+ ['extension', 'procs', 'enums', 'supported_specs'])
+
def parse_version(version):
return Version(*map(int, version.split('.')))
@@ -107,7 +116,7 @@ def compute_params(root, supported_extensions):
all_procs = {}
for command in root.findall('''commands[@namespace='GL']/command'''):
proc = Proc(command)
- assert(proc.gl_name not in all_procs)
+ assert proc.gl_name not in all_procs
all_procs[proc.gl_name] = proc
all_enums = {}
@@ -117,7 +126,7 @@ def compute_params(root, supported_extensions):
if enum_name == 'GL_ACTIVE_PROGRAM_EXT':
continue
- assert(enum_name not in all_enums)
+ assert enum_name not in all_enums
all_enums[enum_name] = EnumDefine(enum_name, enum.attrib['value'])
# Get the list of all Desktop OpenGL function removed by the Core Profile.
@@ -126,13 +135,13 @@ def compute_params(root, supported_extensions):
core_removed_procs.add(proc.attrib['name'])
# Get list of enums and procs per OpenGL ES/Desktop OpenGL version
- def parse_version_blocks(api, removed_procs = set()):
+ def parse_version_blocks(api, removed_procs=set()):
blocks = []
for section in root.findall('''feature[@api='{}']'''.format(api)):
section_procs = []
for command in section.findall('./require/command'):
proc_name = command.attrib['name']
- assert(all_procs[proc_name].alias == None)
+ assert all_procs[proc_name].alias == None
if proc_name not in removed_procs:
section_procs.append(all_procs[proc_name])
@@ -140,7 +149,9 @@ def compute_params(root, supported_extensions):
for enum in section.findall('./require/enum'):
section_enums.append(all_enums[enum.attrib['name']])
- blocks.append(VersionBlock(parse_version(section.attrib['number']), section_procs, section_enums))
+ blocks.append(
+ VersionBlock(parse_version(section.attrib['number']),
+ section_procs, section_enums))
return blocks
@@ -148,12 +159,13 @@ def compute_params(root, supported_extensions):
desktop_gl_blocks = parse_version_blocks('gl', core_removed_procs)
def parse_extension_block(extension):
- section = root.find('''extensions/extension[@name='{}']'''.format(extension))
+ section = root.find(
+ '''extensions/extension[@name='{}']'''.format(extension))
supported_specs = section.attrib['supported'].split('|')
section_procs = []
for command in section.findall('./require/command'):
proc_name = command.attrib['name']
- assert(all_procs[proc_name].alias == None)
+ assert all_procs[proc_name].alias == None
if proc_name not in removed_procs:
section_procs.append(all_procs[proc_name])
@@ -161,10 +173,11 @@ def compute_params(root, supported_extensions):
for enum in section.findall('./require/enum'):
section_enums.append(all_enums[enum.attrib['name']])
- return ExtensionBlock(extension, section_procs, section_enums, supported_specs)
+ return ExtensionBlock(extension, section_procs, section_enums,
+ supported_specs)
- extension_desktop_gl_blocks = [];
- extension_gles_blocks = [];
+ extension_desktop_gl_blocks = []
+ extension_gles_blocks = []
for extension in supported_extensions:
extension_block = parse_extension_block(extension)
if 'gl' in extension_block.supported_specs:
@@ -176,6 +189,7 @@ def compute_params(root, supported_extensions):
already_added_header_procs = set()
already_added_header_enums = set()
header_blocks = []
+
def add_header_block(description, block):
block_procs = []
for proc in block.procs:
@@ -190,13 +204,18 @@ def compute_params(root, supported_extensions):
block_enums.append(enum)
if len(block_procs) > 0 or len(block_enums) > 0:
- header_blocks.append(HeaderBlock(description, block_procs, block_enums))
+ header_blocks.append(
+ HeaderBlock(description, block_procs, block_enums))
for block in gles_blocks:
- add_header_block('OpenGL ES {}.{}'.format(block.version.major, block.version.minor), block)
+ add_header_block(
+ 'OpenGL ES {}.{}'.format(block.version.major, block.version.minor),
+ block)
for block in desktop_gl_blocks:
- add_header_block('Desktop OpenGL {}.{}'.format(block.version.major, block.version.minor), block)
+ add_header_block(
+ 'Desktop OpenGL {}.{}'.format(block.version.major,
+ block.version.minor), block)
for block in extension_desktop_gl_blocks:
add_header_block(block.extension, block)
@@ -212,30 +231,50 @@ def compute_params(root, supported_extensions):
'header_blocks': header_blocks,
}
+
class OpenGLLoaderGenerator(Generator):
def get_description(self):
return 'Generates code to load OpenGL function pointers'
def add_commandline_arguments(self, parser):
- parser.add_argument('--gl-xml', required=True, type=str, help='The Khronos gl.xml to use.')
- parser.add_argument('--supported-extensions', required=True, type=str, help ='The JSON file that defines the OpenGL and GLES extensions to use.')
+ parser.add_argument('--gl-xml',
+ required=True,
+ type=str,
+ help='The Khronos gl.xml to use.')
+ parser.add_argument(
+ '--supported-extensions',
+ required=True,
+ type=str,
+ help=
+ 'The JSON file that defines the OpenGL and GLES extensions to use.'
+ )
def get_file_renders(self, args):
supported_extensions = []
with open(args.supported_extensions) as f:
supported_extensions_json = json.loads(f.read())
- supported_extensions = supported_extensions_json['supported_extensions']
+ supported_extensions = supported_extensions_json[
+ 'supported_extensions']
- params = compute_params(etree.parse(args.gl_xml).getroot(), supported_extensions)
+ params = compute_params(
+ etree.parse(args.gl_xml).getroot(), supported_extensions)
return [
- FileRender('opengl/OpenGLFunctionsBase.cpp', 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.cpp', [params]),
- FileRender('opengl/OpenGLFunctionsBase.h', 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.h', [params]),
- FileRender('opengl/opengl_platform.h', 'src/dawn_native/opengl/opengl_platform_autogen.h', [params]),
+ FileRender(
+ 'opengl/OpenGLFunctionsBase.cpp',
+ 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.cpp',
+ [params]),
+ FileRender('opengl/OpenGLFunctionsBase.h',
+ 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.h',
+ [params]),
+ FileRender('opengl/opengl_platform.h',
+ 'src/dawn_native/opengl/opengl_platform_autogen.h',
+ [params]),
]
def get_dependencies(self, args):
return [os.path.abspath(args.gl_xml)]
+
if __name__ == '__main__':
sys.exit(run_generator(OpenGLLoaderGenerator()))
diff --git a/chromium/third_party/dawn/generator/remove_files.py b/chromium/third_party/dawn/generator/remove_files.py
index 0b8b5216145..6ddf463667d 100644
--- a/chromium/third_party/dawn/generator/remove_files.py
+++ b/chromium/third_party/dawn/generator/remove_files.py
@@ -15,11 +15,15 @@
import argparse, glob, os, sys
+
def check_in_subdirectory(path, directory):
return path.startswith(directory) and not '/' in path[len(directory):]
+
def check_is_allowed(path, allowed_dirs):
- return any(check_in_subdirectory(path, directory) for directory in allowed_dirs)
+ return any(
+ check_in_subdirectory(path, directory) for directory in allowed_dirs)
+
def get_all_files_in_dir(find_directory):
result = []
@@ -27,15 +31,28 @@ def get_all_files_in_dir(find_directory):
result += [os.path.join(directory, filename) for filename in files]
return result
+
def run():
# Parse command line arguments
parser = argparse.ArgumentParser(
- description = "Removes stale autogenerated files from gen/ directories."
+ description="Removes stale autogenerated files from gen/ directories.")
+ parser.add_argument(
+ '--root-dir',
+ type=str,
+ help='The root directory, all other paths in files are relative to it.'
)
- parser.add_argument('--root-dir', type=str, help='The root directory, all other paths in files are relative to it.')
- parser.add_argument('--allowed-output-dirs-file', type=str, help='The file containing a list of allowed directories')
- parser.add_argument('--stale-dirs-file', type=str, help='The file containing a list of directories to check for stale files')
- parser.add_argument('--stamp', type=str, help='A stamp written once this script completes')
+ parser.add_argument(
+ '--allowed-output-dirs-file',
+ type=str,
+ help='The file containing a list of allowed directories')
+ parser.add_argument(
+ '--stale-dirs-file',
+ type=str,
+ help=
+ 'The file containing a list of directories to check for stale files')
+ parser.add_argument('--stamp',
+ type=str,
+ help='A stamp written once this script completes')
args = parser.parse_args()
root_dir = args.root_dir
@@ -43,11 +60,13 @@ def run():
# Load the list of allowed and stale directories
with open(args.allowed_output_dirs_file) as f:
- allowed_dirs = set([os.path.join(root_dir, line.strip()) for line in f.readlines()])
+ allowed_dirs = set(
+ [os.path.join(root_dir, line.strip()) for line in f.readlines()])
for directory in allowed_dirs:
if not directory.endswith('/'):
- print('Allowed directory entry "{}" doesn\'t end with /'.format(directory))
+ print('Allowed directory entry "{}" doesn\'t end with /'.format(
+ directory))
return 1
with open(args.stale_dirs_file) as f:
@@ -67,5 +86,6 @@ def run():
return 0
+
if __name__ == "__main__":
sys.exit(run())
diff --git a/chromium/third_party/dawn/generator/templates/.clang-format b/chromium/third_party/dawn/generator/templates/.clang-format
new file mode 100644
index 00000000000..9d159247d51
--- /dev/null
+++ b/chromium/third_party/dawn/generator/templates/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiObjects.h b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiObjects.h
index 5c2ae3f613c..288c7004de1 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiObjects.h
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiObjects.h
@@ -16,10 +16,24 @@
#define DAWNWIRE_CLIENT_APIOBJECTS_AUTOGEN_H_
namespace dawn_wire { namespace client {
- {% for type in by_category["object"] if not type.name.CamelCase() in client_special_objects %}
- struct {{type.name.CamelCase()}} : ObjectBase {
- using ObjectBase::ObjectBase;
- };
+
+ {% for type in by_category["object"] %}
+ {% set Type = type.name.CamelCase() %}
+ {% if type.name.CamelCase() in client_special_objects %}
+ class {{Type}};
+ {% else %}
+ struct {{type.name.CamelCase()}} : ObjectBase {
+ using ObjectBase::ObjectBase;
+ };
+ {% endif %}
+
+ inline {{Type}}* FromAPI(WGPU{{Type}} obj) {
+ return reinterpret_cast<{{Type}}*>(obj);
+ }
+ inline WGPU{{Type}} ToAPI({{Type}}* obj) {
+ return reinterpret_cast<WGPU{{Type}}>(obj);
+ }
+
{% endfor %}
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
index a604a7f2a57..3edba1a22b2 100644
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
+++ b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.cpp
@@ -14,7 +14,6 @@
#include "common/Log.h"
#include "dawn_wire/client/ApiObjects.h"
-#include "dawn_wire/client/ApiProcs_autogen.h"
#include "dawn_wire/client/Client.h"
#include <algorithm>
@@ -157,9 +156,8 @@ namespace dawn_wire { namespace client {
} while (false);
if (DAWN_UNLIKELY(!sameDevice)) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
- WGPUErrorType_Validation,
- "All objects must be from the same device.");
+ device->InjectError(WGPUErrorType_Validation,
+ "All objects must be from the same device.");
{% if method.return_type.category == "object" %}
// Allocate an object without registering it on the server. This is backed by a real allocation on
// the client so commands can be sent with it. But because it's not allocated on the server, it will
@@ -176,8 +174,8 @@ namespace dawn_wire { namespace client {
}
{% endif %}
+ auto self = reinterpret_cast<{{as_wireType(type)}}>(cSelf);
{% if Suffix not in client_handwritten_commands %}
- auto self = reinterpret_cast<{{as_wireType(type)}}>(cSelf);
Device* device = self->device;
{{Suffix}}Cmd cmd;
@@ -202,9 +200,9 @@ namespace dawn_wire { namespace client {
return reinterpret_cast<{{as_cType(method.return_type.name)}}>(allocation->object.get());
{% endif %}
{% else %}
- return ClientHandwritten{{Suffix}}(cSelf
+ return self->{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
- , {{as_varName(arg.name)}}
+ {%if not loop.first %}, {% endif %} {{as_varName(arg.name)}}
{%- endfor -%});
{% endif %}
}
@@ -241,6 +239,12 @@ namespace dawn_wire { namespace client {
return nullptr;
}
+ void ClientDeviceReference(WGPUDevice) {
+ }
+
+ void ClientDeviceRelease(WGPUDevice) {
+ }
+
struct ProcEntry {
WGPUProc proc;
const char* name;
diff --git a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.h b/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.h
deleted file mode 100644
index 2a27c3cc8e3..00000000000
--- a/chromium/third_party/dawn/generator/templates/dawn_wire/client/ApiProcs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//* Copyright 2019 The Dawn Authors
-//*
-//* Licensed under the Apache License, Version 2.0 (the "License");
-//* you may not use this file except in compliance with the License.
-//* You may obtain a copy of the License at
-//*
-//* http://www.apache.org/licenses/LICENSE-2.0
-//*
-//* Unless required by applicable law or agreed to in writing, software
-//* distributed under the License is distributed on an "AS IS" BASIS,
-//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//* See the License for the specific language governing permissions and
-//* limitations under the License.
-
-#ifndef DAWNWIRE_CLIENT_APIPROCS_AUTOGEN_H_
-#define DAWNWIRE_CLIENT_APIPROCS_AUTOGEN_H_
-
-#include <dawn/webgpu.h>
-
-namespace dawn_wire { namespace client {
-
- //* Dawn API
- {% for type in by_category["object"] %}
- {% set cType = as_cType(type.name) %}
- {% for method in c_methods(type) %}
- {% set Suffix = as_MethodSuffix(type.name, method.name) %}
- {% if Suffix in client_handwritten_commands %}
- {% set Suffix = "Handwritten" + Suffix %}
- {% endif %}
- {{as_cType(method.return_type.name)}} Client{{Suffix}}(
- {{-cType}} cSelf
- {%- for arg in method.arguments -%}
- , {{as_annotated_cType(arg)}}
- {%- endfor -%}
- );
- {% endfor %}
- {% endfor %}
-
-}} // namespace dawn_wire::client
-
-#endif // DAWNWIRE_CLIENT_APIPROCS_AUTOGEN_H_
diff --git a/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp b/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
index 6b8c5a6863c..0abfe9a93b4 100644
--- a/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
+++ b/chromium/third_party/dawn/generator/templates/mock_webgpu.cpp
@@ -96,6 +96,19 @@ void ProcTableAsClass::BufferMapWriteAsync(WGPUBuffer self,
OnBufferMapWriteAsyncCallback(self, callback, userdata);
}
+void ProcTableAsClass::BufferMapAsync(WGPUBuffer self,
+ WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
+ object->mapAsyncCallback = callback;
+ object->userdata = userdata;
+
+ OnBufferMapAsyncCallback(self, callback, userdata);
+}
+
void ProcTableAsClass::FenceOnCompletion(WGPUFence self,
uint64_t value,
WGPUFenceOnCompletionCallback callback,
@@ -135,6 +148,11 @@ void ProcTableAsClass::CallMapWriteCallback(WGPUBuffer buffer,
object->mapWriteCallback(status, data, dataLength, object->userdata);
}
+void ProcTableAsClass::CallMapAsyncCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status) {
+ auto object = reinterpret_cast<ProcTableAsClass::Object*>(buffer);
+ object->mapAsyncCallback(status, object->userdata);
+}
+
void ProcTableAsClass::CallFenceOnCompletionCallback(WGPUFence fence,
WGPUFenceCompletionStatus status) {
auto object = reinterpret_cast<ProcTableAsClass::Object*>(fence);
diff --git a/chromium/third_party/dawn/generator/templates/mock_webgpu.h b/chromium/third_party/dawn/generator/templates/mock_webgpu.h
index 78b9b78ef30..0a44248e004 100644
--- a/chromium/third_party/dawn/generator/templates/mock_webgpu.h
+++ b/chromium/third_party/dawn/generator/templates/mock_webgpu.h
@@ -65,6 +65,12 @@ class ProcTableAsClass {
void BufferMapWriteAsync(WGPUBuffer self,
WGPUBufferMapWriteCallback callback,
void* userdata);
+ void BufferMapAsync(WGPUBuffer self,
+ WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
void FenceOnCompletion(WGPUFence self,
uint64_t value,
WGPUFenceOnCompletionCallback callback,
@@ -86,6 +92,9 @@ class ProcTableAsClass {
virtual void OnBufferMapWriteAsyncCallback(WGPUBuffer buffer,
WGPUBufferMapWriteCallback callback,
void* userdata) = 0;
+ virtual void OnBufferMapAsyncCallback(WGPUBuffer buffer,
+ WGPUBufferMapCallback callback,
+ void* userdata) = 0;
virtual void OnFenceOnCompletionCallback(WGPUFence fence,
uint64_t value,
WGPUFenceOnCompletionCallback callback,
@@ -96,6 +105,7 @@ class ProcTableAsClass {
void CallDeviceLostCallback(WGPUDevice device, const char* message);
void CallMapReadCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, const void* data, uint64_t dataLength);
void CallMapWriteCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength);
+ void CallMapAsyncCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status);
void CallFenceOnCompletionCallback(WGPUFence fence, WGPUFenceCompletionStatus status);
struct Object {
@@ -104,6 +114,7 @@ class ProcTableAsClass {
WGPUDeviceLostCallback deviceLostCallback = nullptr;
WGPUBufferMapReadCallback mapReadCallback = nullptr;
WGPUBufferMapWriteCallback mapWriteCallback = nullptr;
+ WGPUBufferMapCallback mapAsyncCallback = nullptr;
WGPUFenceOnCompletionCallback fenceOnCompletionCallback = nullptr;
void* userdata = 0;
};
@@ -140,6 +151,10 @@ class MockProcTable : public ProcTableAsClass {
MOCK_METHOD(bool, OnDevicePopErrorScopeCallback, (WGPUDevice device, WGPUErrorCallback callback, void* userdata), (override));
MOCK_METHOD(void, OnBufferMapReadAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void* userdata), (override));
MOCK_METHOD(void, OnBufferMapWriteAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapWriteCallback callback, void* userdata), (override));
+ MOCK_METHOD(void,
+ OnBufferMapAsyncCallback,
+ (WGPUBuffer buffer, WGPUBufferMapCallback callback, void* userdata),
+ (override));
MOCK_METHOD(void, OnFenceOnCompletionCallback, (WGPUFence fence, uint64_t value, WGPUFenceOnCompletionCallback callback, void* userdata), (override));
};
diff --git a/chromium/third_party/dawn/generator/templates/webgpu.h b/chromium/third_party/dawn/generator/templates/webgpu.h
index 4e1c7c8ef34..ec9db20519a 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu.h
+++ b/chromium/third_party/dawn/generator/templates/webgpu.h
@@ -93,6 +93,8 @@ typedef uint32_t WGPUFlags;
{% endif %}
{% endfor %}
+//* TODO(dawn:22) remove this once the PSA is sent and the deadline passed.
+#define WGPUTextureFormat_RG11B10Float WGPUTextureFormat_RG11B10Ufloat
typedef struct WGPUChainedStruct {
struct WGPUChainedStruct const * next;
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp b/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
index 7d22f640af4..be17a5d6d82 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
+++ b/chromium/third_party/dawn/generator/templates/webgpu_cpp.cpp
@@ -11,37 +11,39 @@
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
-
#include "dawn/webgpu_cpp.h"
namespace wgpu {
-
{% for type in by_category["enum"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
+ // {{CppType}}
+
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
{% for value in type.values %}
static_assert(static_cast<uint32_t>({{CppType}}::{{as_cppEnum(value.name)}}) == {{as_cEnum(type.name, value.name)}}, "value mismatch for {{CppType}}::{{as_cppEnum(value.name)}}");
{% endfor %}
-
- {% endfor %}
+ {% endfor -%}
{% for type in by_category["bitmask"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) + "Flags" %}
+ // {{CppType}}
+
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
{% for value in type.values %}
static_assert(static_cast<uint32_t>({{CppType}}::{{as_cppEnum(value.name)}}) == {{as_cEnum(type.name, value.name)}}, "value mismatch for {{CppType}}::{{as_cppEnum(value.name)}}");
{% endfor %}
-
{% endfor %}
+ // ChainedStruct
+
static_assert(sizeof(ChainedStruct) == sizeof(WGPUChainedStruct),
"sizeof mismatch for ChainedStruct");
static_assert(alignof(ChainedStruct) == alignof(WGPUChainedStruct),
@@ -50,11 +52,12 @@ namespace wgpu {
"offsetof mismatch for ChainedStruct::nextInChain");
static_assert(offsetof(ChainedStruct, sType) == offsetof(WGPUChainedStruct, sType),
"offsetof mismatch for ChainedStruct::sType");
-
{% for type in by_category["structure"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
+ // {{CppType}}
+
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
@@ -67,17 +70,18 @@ namespace wgpu {
static_assert(offsetof({{CppType}}, {{memberName}}) == offsetof({{CType}}, {{memberName}}),
"offsetof mismatch for {{CppType}}::{{memberName}}");
{% endfor %}
-
- {% endfor %}
+ {% endfor -%}
{% for type in by_category["object"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
+ // {{CppType}}
+
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
- {% macro render_cpp_method_declaration(type, method) %}
+ {% macro render_cpp_method_declaration(type, method) -%}
{% set CppType = as_cppType(type.name) %}
{{as_cppType(method.return_type.name)}} {{CppType}}::{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
@@ -89,9 +93,9 @@ namespace wgpu {
{%- endif -%}
{%- endfor -%}
) const
- {%- endmacro %}
+ {%- endmacro -%}
- {% macro render_cpp_to_c_method_call(type, method) -%}
+ {%- macro render_cpp_to_c_method_call(type, method) -%}
{{as_cMethod(type.name, method.name)}}(Get()
{%- for arg in method.arguments -%},{{" "}}
{%- if arg.annotation == "value" -%}
@@ -109,9 +113,9 @@ namespace wgpu {
{%- endif -%}
{%- endfor -%}
)
- {%- endmacro %}
+ {%- endmacro -%}
- {% for method in type.methods %}
+ {% for method in type.methods -%}
{{render_cpp_method_declaration(type, method)}} {
{% if method.return_type.name.concatcase() == "void" %}
{{render_cpp_to_c_method_call(type, method)}};
@@ -131,9 +135,10 @@ namespace wgpu {
{{as_cMethod(type.name, Name("release"))}}(handle);
}
}
-
{% endfor %}
+ // Instance
+
Instance CreateInstance(const InstanceDescriptor* descriptor) {
const WGPUInstanceDescriptor* cDescriptor =
reinterpret_cast<const WGPUInstanceDescriptor*>(descriptor);
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_cpp.h b/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
index 3dde1e803e3..d02bf986ace 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
+++ b/chromium/third_party/dawn/generator/templates/webgpu_cpp.h
@@ -11,7 +11,6 @@
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
-
#ifndef WEBGPU_CPP_H_
#define WEBGPU_CPP_H_
@@ -27,6 +26,10 @@ namespace wgpu {
{% for value in type.values %}
{{as_cppEnum(value.name)}} = 0x{{format(value.value, "08X")}},
{% endfor %}
+ //* TODO(dawn:22) remove this once the PSA is sent and the deadline passed.
+ {% if type.name.canonical_case() == "texture format" %}
+ RG11B10Float = RG11B10Ufloat,
+ {% endif %}
};
{% endfor %}
diff --git a/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json b/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json
index 5120ba8d5aa..c4b40001ccd 100644
--- a/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json
+++ b/chromium/third_party/dawn/generator/templates/webgpu_struct_info.json
@@ -23,14 +23,13 @@
"defines": [],
"structs": {
"WGPUChainedStruct": [
- "nextInChain",
+ "next",
"sType"
],
{% for type in by_category["structure"] if type.javascript %}
"{{as_cType(type.name)}}": [
{% if type.chained %}
- "nextInChain",
- "sType"
+ "chain"
{%- elif type.extensible %}
"nextInChain"
{%- endif %}
diff --git a/chromium/third_party/dawn/infra/config/PRESUBMIT.py b/chromium/third_party/dawn/infra/config/PRESUBMIT.py
index 8f08f24a495..de9b7eb0236 100644
--- a/chromium/third_party/dawn/infra/config/PRESUBMIT.py
+++ b/chromium/third_party/dawn/infra/config/PRESUBMIT.py
@@ -14,8 +14,10 @@
def CheckChangeOnUpload(input_api, output_api):
- return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api)
+ return input_api.canned_checks.CheckChangedLUCIConfigs(
+ input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
- return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api)
+ return input_api.canned_checks.CheckChangedLUCIConfigs(
+ input_api, output_api)
diff --git a/chromium/third_party/dawn/scripts/dawn_features.gni b/chromium/third_party/dawn/scripts/dawn_features.gni
index 1cee17450fa..9d3cbfcf250 100644
--- a/chromium/third_party/dawn/scripts/dawn_features.gni
+++ b/chromium/third_party/dawn/scripts/dawn_features.gni
@@ -69,9 +69,9 @@ declare_args() {
# Whether Dawn should enable X11 support.
dawn_use_x11 = is_linux && !is_chromeos
- # Enable support WGSL for shaders. Currently this is
- # experimental, and requires dawn_tint_dir to be set.
- dawn_enable_wgsl = false
+ # Enable support WGSL for shaders.
+ # Turned off for Skia, because Tint is currently not part of its DEPS.
+ dawn_enable_wgsl = !defined(is_skia_standalone)
}
# GN does not allow reading a variable defined in the same declare_args().
diff --git a/chromium/third_party/dawn/scripts/git-clang-format b/chromium/third_party/dawn/scripts/git-clang-format
deleted file mode 100755
index 60cd4fb25b6..00000000000
--- a/chromium/third_party/dawn/scripts/git-clang-format
+++ /dev/null
@@ -1,579 +0,0 @@
-#!/usr/bin/env python
-#
-#===- git-clang-format - ClangFormat Git Integration ---------*- python -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-r"""
-clang-format git integration
-============================
-
-This file provides a clang-format integration for git. Put it somewhere in your
-path and ensure that it is executable. Then, "git clang-format" will invoke
-clang-format on the changes in current files or a specific commit.
-
-For further details, run:
-git clang-format -h
-
-Requires Python 2.7 or Python 3
-"""
-
-from __future__ import print_function
-import argparse
-import collections
-import contextlib
-import errno
-import os
-import re
-import subprocess
-import sys
-
-usage = 'git clang-format [OPTIONS] [<commit>] [<commit>] [--] [<file>...]'
-
-desc = '''
-If zero or one commits are given, run clang-format on all lines that differ
-between the working directory and <commit>, which defaults to HEAD. Changes are
-only applied to the working directory.
-
-If two commits are given (requires --diff), run clang-format on all lines in the
-second <commit> that differ from the first <commit>.
-
-The following git-config settings set the default of the corresponding option:
- clangFormat.binary
- clangFormat.commit
- clangFormat.extension
- clangFormat.style
-'''
-
-# Name of the temporary index file in which save the output of clang-format.
-# This file is created within the .git directory.
-temp_index_basename = 'clang-format-index'
-
-
-Range = collections.namedtuple('Range', 'start, count')
-
-
-def main():
- config = load_git_config()
-
- # In order to keep '--' yet allow options after positionals, we need to
- # check for '--' ourselves. (Setting nargs='*' throws away the '--', while
- # nargs=argparse.REMAINDER disallows options after positionals.)
- argv = sys.argv[1:]
- try:
- idx = argv.index('--')
- except ValueError:
- dash_dash = []
- else:
- dash_dash = argv[idx:]
- argv = argv[:idx]
-
- default_extensions = ','.join([
- # From clang/lib/Frontend/FrontendOptions.cpp, all lower case
- 'c', 'h', # C
- 'm', # ObjC
- 'mm', # ObjC++
- 'cc', 'cp', 'cpp', 'c++', 'cxx', 'hpp', # C++
- # Other languages that clang-format supports
- 'proto', 'protodevel', # Protocol Buffers
- 'java', # Java
- 'js', # JavaScript
- 'ts', # TypeScript
- ])
-
- p = argparse.ArgumentParser(
- usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter,
- description=desc)
- p.add_argument('--binary',
- default=config.get('clangformat.binary', 'clang-format'),
- help='path to clang-format'),
- p.add_argument('--commit',
- default=config.get('clangformat.commit', 'HEAD'),
- help='default commit to use if none is specified'),
- p.add_argument('--diff', action='store_true',
- help='print a diff instead of applying the changes')
- p.add_argument('--extensions',
- default=config.get('clangformat.extensions',
- default_extensions),
- help=('comma-separated list of file extensions to format, '
- 'excluding the period and case-insensitive')),
- p.add_argument('-f', '--force', action='store_true',
- help='allow changes to unstaged files')
- p.add_argument('-p', '--patch', action='store_true',
- help='select hunks interactively')
- p.add_argument('-q', '--quiet', action='count', default=0,
- help='print less information')
- p.add_argument('--style',
- default=config.get('clangformat.style', None),
- help='passed to clang-format'),
- p.add_argument('-v', '--verbose', action='count', default=0,
- help='print extra information')
- # We gather all the remaining positional arguments into 'args' since we need
- # to use some heuristics to determine whether or not <commit> was present.
- # However, to print pretty messages, we make use of metavar and help.
- p.add_argument('args', nargs='*', metavar='<commit>',
- help='revision from which to compute the diff')
- p.add_argument('ignored', nargs='*', metavar='<file>...',
- help='if specified, only consider differences in these files')
- opts = p.parse_args(argv)
-
- opts.verbose -= opts.quiet
- del opts.quiet
-
- commits, files = interpret_args(opts.args, dash_dash, opts.commit)
- if len(commits) > 1:
- if not opts.diff:
- die('--diff is required when two commits are given')
- else:
- if len(commits) > 2:
- die('at most two commits allowed; %d given' % len(commits))
- changed_lines = compute_diff_and_extract_lines(commits, files)
- if opts.verbose >= 1:
- ignored_files = set(changed_lines)
- filter_by_extension(changed_lines, opts.extensions.lower().split(','))
- if opts.verbose >= 1:
- ignored_files.difference_update(changed_lines)
- if ignored_files:
- print('Ignoring changes in the following files (wrong extension):')
- for filename in ignored_files:
- print(' %s' % filename)
- if changed_lines:
- print('Running clang-format on the following files:')
- for filename in changed_lines:
- print(' %s' % filename)
- if not changed_lines:
- print('no modified files to format')
- return
- # The computed diff outputs absolute paths, so we must cd before accessing
- # those files.
- cd_to_toplevel()
- if len(commits) > 1:
- old_tree = commits[1]
- new_tree = run_clang_format_and_save_to_tree(changed_lines,
- revision=commits[1],
- binary=opts.binary,
- style=opts.style)
- else:
- old_tree = create_tree_from_workdir(changed_lines)
- new_tree = run_clang_format_and_save_to_tree(changed_lines,
- binary=opts.binary,
- style=opts.style)
- if opts.verbose >= 1:
- print('old tree: %s' % old_tree)
- print('new tree: %s' % new_tree)
- if old_tree == new_tree:
- if opts.verbose >= 0:
- print('clang-format did not modify any files')
- elif opts.diff:
- print_diff(old_tree, new_tree)
- else:
- changed_files = apply_changes(old_tree, new_tree, force=opts.force,
- patch_mode=opts.patch)
- if (opts.verbose >= 0 and not opts.patch) or opts.verbose >= 1:
- print('changed files:')
- for filename in changed_files:
- print(' %s' % filename)
-
-
-def load_git_config(non_string_options=None):
- """Return the git configuration as a dictionary.
-
- All options are assumed to be strings unless in `non_string_options`, in which
- is a dictionary mapping option name (in lower case) to either "--bool" or
- "--int"."""
- if non_string_options is None:
- non_string_options = {}
- out = {}
- for entry in run('git', 'config', '--list', '--null').split('\0'):
- if entry:
- name, value = entry.split('\n', 1)
- if name in non_string_options:
- value = run('git', 'config', non_string_options[name], name)
- out[name] = value
- return out
-
-
-def interpret_args(args, dash_dash, default_commit):
- """Interpret `args` as "[commits] [--] [files]" and return (commits, files).
-
- It is assumed that "--" and everything that follows has been removed from
- args and placed in `dash_dash`.
-
- If "--" is present (i.e., `dash_dash` is non-empty), the arguments to its
- left (if present) are taken as commits. Otherwise, the arguments are checked
- from left to right if they are commits or files. If commits are not given,
- a list with `default_commit` is used."""
- if dash_dash:
- if len(args) == 0:
- commits = [default_commit]
- else:
- commits = args
- for commit in commits:
- object_type = get_object_type(commit)
- if object_type not in ('commit', 'tag'):
- if object_type is None:
- die("'%s' is not a commit" % commit)
- else:
- die("'%s' is a %s, but a commit was expected" % (commit, object_type))
- files = dash_dash[1:]
- elif args:
- commits = []
- while args:
- if not disambiguate_revision(args[0]):
- break
- commits.append(args.pop(0))
- if not commits:
- commits = [default_commit]
- files = args
- else:
- commits = [default_commit]
- files = []
- return commits, files
-
-
-def disambiguate_revision(value):
- """Returns True if `value` is a revision, False if it is a file, or dies."""
- # If `value` is ambiguous (neither a commit nor a file), the following
- # command will die with an appropriate error message.
- run('git', 'rev-parse', value, verbose=False)
- object_type = get_object_type(value)
- if object_type is None:
- return False
- if object_type in ('commit', 'tag'):
- return True
- die('`%s` is a %s, but a commit or filename was expected' %
- (value, object_type))
-
-
-def get_object_type(value):
- """Returns a string description of an object's type, or None if it is not
- a valid git object."""
- cmd = ['git', 'cat-file', '-t', value]
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- if p.returncode != 0:
- return None
- return convert_string(stdout.strip())
-
-
-def compute_diff_and_extract_lines(commits, files):
- """Calls compute_diff() followed by extract_lines()."""
- diff_process = compute_diff(commits, files)
- changed_lines = extract_lines(diff_process.stdout)
- diff_process.stdout.close()
- diff_process.wait()
- if diff_process.returncode != 0:
- # Assume error was already printed to stderr.
- sys.exit(2)
- return changed_lines
-
-
-def compute_diff(commits, files):
- """Return a subprocess object producing the diff from `commits`.
-
- The return value's `stdin` file object will produce a patch with the
- differences between the working directory and the first commit if a single
- one was specified, or the difference between both specified commits, filtered
- on `files` (if non-empty). Zero context lines are used in the patch."""
- git_tool = 'diff-index'
- if len(commits) > 1:
- git_tool = 'diff-tree'
- cmd = ['git', git_tool, '-p', '-U0'] + commits + ['--']
- cmd.extend(files)
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- p.stdin.close()
- return p
-
-
-def extract_lines(patch_file):
- """Extract the changed lines in `patch_file`.
-
- The return value is a dictionary mapping filename to a list of (start_line,
- line_count) pairs.
-
- The input must have been produced with ``-U0``, meaning unidiff format with
- zero lines of context. The return value is a dict mapping filename to a
- list of line `Range`s."""
- matches = {}
- for line in patch_file:
- line = convert_string(line)
- match = re.search(r'^\+\+\+\ [^/]+/(.*)', line)
- if match:
- filename = match.group(1).rstrip('\r\n')
- match = re.search(r'^@@ -[0-9,]+ \+(\d+)(,(\d+))?', line)
- if match:
- start_line = int(match.group(1))
- line_count = 1
- if match.group(3):
- line_count = int(match.group(3))
- if line_count > 0:
- matches.setdefault(filename, []).append(Range(start_line, line_count))
- return matches
-
-
-def filter_by_extension(dictionary, allowed_extensions):
- """Delete every key in `dictionary` that doesn't have an allowed extension.
-
- `allowed_extensions` must be a collection of lowercase file extensions,
- excluding the period."""
- allowed_extensions = frozenset(allowed_extensions)
- for filename in list(dictionary.keys()):
- base_ext = filename.rsplit('.', 1)
- if len(base_ext) == 1 and '' in allowed_extensions:
- continue
- if len(base_ext) == 1 or base_ext[1].lower() not in allowed_extensions:
- del dictionary[filename]
-
-
-def cd_to_toplevel():
- """Change to the top level of the git repository."""
- toplevel = run('git', 'rev-parse', '--show-toplevel')
- os.chdir(toplevel)
-
-
-def create_tree_from_workdir(filenames):
- """Create a new git tree with the given files from the working directory.
-
- Returns the object ID (SHA-1) of the created tree."""
- return create_tree(filenames, '--stdin')
-
-
-def run_clang_format_and_save_to_tree(changed_lines, revision=None,
- binary='clang-format', style=None):
- """Run clang-format on each file and save the result to a git tree.
-
- Returns the object ID (SHA-1) of the created tree."""
- def iteritems(container):
- try:
- return container.iteritems() # Python 2
- except AttributeError:
- return container.items() # Python 3
- def index_info_generator():
- for filename, line_ranges in iteritems(changed_lines):
- if revision:
- git_metadata_cmd = ['git', 'ls-tree',
- '%s:%s' % (revision, os.path.dirname(filename)),
- os.path.basename(filename)]
- git_metadata = subprocess.Popen(git_metadata_cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
- stdout = git_metadata.communicate()[0]
- mode = oct(int(stdout.split()[0], 8))
- else:
- mode = oct(os.stat(filename).st_mode)
- # Adjust python3 octal format so that it matches what git expects
- if mode.startswith('0o'):
- mode = '0' + mode[2:]
- blob_id = clang_format_to_blob(filename, line_ranges,
- revision=revision,
- binary=binary,
- style=style)
- yield '%s %s\t%s' % (mode, blob_id, filename)
- return create_tree(index_info_generator(), '--index-info')
-
-
-def create_tree(input_lines, mode):
- """Create a tree object from the given input.
-
- If mode is '--stdin', it must be a list of filenames. If mode is
- '--index-info' is must be a list of values suitable for "git update-index
- --index-info", such as "<mode> <SP> <sha1> <TAB> <filename>". Any other mode
- is invalid."""
- assert mode in ('--stdin', '--index-info')
- cmd = ['git', 'update-index', '--add', '-z', mode]
- with temporary_index_file():
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
- for line in input_lines:
- p.stdin.write(to_bytes('%s\0' % line))
- p.stdin.close()
- if p.wait() != 0:
- die('`%s` failed' % ' '.join(cmd))
- tree_id = run('git', 'write-tree')
- return tree_id
-
-
-def clang_format_to_blob(filename, line_ranges, revision=None,
- binary='clang-format', style=None):
- """Run clang-format on the given file and save the result to a git blob.
-
- Runs on the file in `revision` if not None, or on the file in the working
- directory if `revision` is None.
-
- Returns the object ID (SHA-1) of the created blob."""
- clang_format_cmd = [binary]
- if style:
- clang_format_cmd.extend(['-style='+style])
- clang_format_cmd.extend([
- '-lines=%s:%s' % (start_line, start_line+line_count-1)
- for start_line, line_count in line_ranges])
- if revision:
- clang_format_cmd.extend(['-assume-filename='+filename])
- git_show_cmd = ['git', 'cat-file', 'blob', '%s:%s' % (revision, filename)]
- git_show = subprocess.Popen(git_show_cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
- git_show.stdin.close()
- clang_format_stdin = git_show.stdout
- else:
- clang_format_cmd.extend([filename])
- git_show = None
- clang_format_stdin = subprocess.PIPE
- try:
- clang_format = subprocess.Popen(clang_format_cmd, stdin=clang_format_stdin,
- stdout=subprocess.PIPE)
- if clang_format_stdin == subprocess.PIPE:
- clang_format_stdin = clang_format.stdin
- except OSError as e:
- if e.errno == errno.ENOENT:
- die('cannot find executable "%s"' % binary)
- else:
- raise
- clang_format_stdin.close()
- hash_object_cmd = ['git', 'hash-object', '-w', '--path='+filename, '--stdin']
- hash_object = subprocess.Popen(hash_object_cmd, stdin=clang_format.stdout,
- stdout=subprocess.PIPE)
- clang_format.stdout.close()
- stdout = hash_object.communicate()[0]
- if hash_object.returncode != 0:
- die('`%s` failed' % ' '.join(hash_object_cmd))
- if clang_format.wait() != 0:
- die('`%s` failed' % ' '.join(clang_format_cmd))
- if git_show and git_show.wait() != 0:
- die('`%s` failed' % ' '.join(git_show_cmd))
- return convert_string(stdout).rstrip('\r\n')
-
-
-@contextlib.contextmanager
-def temporary_index_file(tree=None):
- """Context manager for setting GIT_INDEX_FILE to a temporary file and deleting
- the file afterward."""
- index_path = create_temporary_index(tree)
- old_index_path = os.environ.get('GIT_INDEX_FILE')
- os.environ['GIT_INDEX_FILE'] = index_path
- try:
- yield
- finally:
- if old_index_path is None:
- del os.environ['GIT_INDEX_FILE']
- else:
- os.environ['GIT_INDEX_FILE'] = old_index_path
- os.remove(index_path)
-
-
-def create_temporary_index(tree=None):
- """Create a temporary index file and return the created file's path.
-
- If `tree` is not None, use that as the tree to read in. Otherwise, an
- empty index is created."""
- gitdir = run('git', 'rev-parse', '--git-dir')
- path = os.path.join(gitdir, temp_index_basename)
- if tree is None:
- tree = '--empty'
- run('git', 'read-tree', '--index-output='+path, tree)
- return path
-
-
-def print_diff(old_tree, new_tree):
- """Print the diff between the two trees to stdout."""
- # We use the porcelain 'diff' and not plumbing 'diff-tree' because the output
- # is expected to be viewed by the user, and only the former does nice things
- # like color and pagination.
- #
- # We also only print modified files since `new_tree` only contains the files
- # that were modified, so unmodified files would show as deleted without the
- # filter.
- subprocess.check_call(['git', 'diff', '--diff-filter=M', old_tree, new_tree,
- '--'])
-
-
-def apply_changes(old_tree, new_tree, force=False, patch_mode=False):
- """Apply the changes in `new_tree` to the working directory.
-
- Bails if there are local changes in those files and not `force`. If
- `patch_mode`, runs `git checkout --patch` to select hunks interactively."""
- changed_files = run('git', 'diff-tree', '--diff-filter=M', '-r', '-z',
- '--name-only', old_tree,
- new_tree).rstrip('\0').split('\0')
- if not force:
- unstaged_files = run('git', 'diff-files', '--name-status', *changed_files)
- if unstaged_files:
- print('The following files would be modified but '
- 'have unstaged changes:', file=sys.stderr)
- print(unstaged_files, file=sys.stderr)
- print('Please commit, stage, or stash them first.', file=sys.stderr)
- sys.exit(2)
- if patch_mode:
- # In patch mode, we could just as well create an index from the new tree
- # and checkout from that, but then the user will be presented with a
- # message saying "Discard ... from worktree". Instead, we use the old
- # tree as the index and checkout from new_tree, which gives the slightly
- # better message, "Apply ... to index and worktree". This is not quite
- # right, since it won't be applied to the user's index, but oh well.
- with temporary_index_file(old_tree):
- subprocess.check_call(['git', 'checkout', '--patch', new_tree])
- index_tree = old_tree
- else:
- with temporary_index_file(new_tree):
- run('git', 'checkout-index', '-a', '-f')
- return changed_files
-
-
-def run(*args, **kwargs):
- stdin = kwargs.pop('stdin', '')
- verbose = kwargs.pop('verbose', True)
- strip = kwargs.pop('strip', True)
- for name in kwargs:
- raise TypeError("run() got an unexpected keyword argument '%s'" % name)
- p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
- stdout, stderr = p.communicate(input=stdin)
-
- stdout = convert_string(stdout)
- stderr = convert_string(stderr)
-
- if p.returncode == 0:
- if stderr:
- if verbose:
- print('`%s` printed to stderr:' % ' '.join(args), file=sys.stderr)
- print(stderr.rstrip(), file=sys.stderr)
- if strip:
- stdout = stdout.rstrip('\r\n')
- return stdout
- if verbose:
- print('`%s` returned %s' % (' '.join(args), p.returncode), file=sys.stderr)
- if stderr:
- print(stderr.rstrip(), file=sys.stderr)
- sys.exit(2)
-
-
-def die(message):
- print('error:', message, file=sys.stderr)
- sys.exit(2)
-
-
-def to_bytes(str_input):
- # Encode to UTF-8 to get binary data.
- if isinstance(str_input, bytes):
- return str_input
- return str_input.encode('utf-8')
-
-
-def to_string(bytes_input):
- if isinstance(bytes_input, str):
- return bytes_input
- return bytes_input.encode('utf-8')
-
-
-def convert_string(bytes_input):
- try:
- return to_string(bytes_input.decode('utf-8'))
- except AttributeError: # 'str' object has no attribute 'decode'.
- return str(bytes_input)
- except UnicodeError:
- return str(bytes_input)
-
-if __name__ == '__main__':
- main()
diff --git a/chromium/third_party/dawn/scripts/lint_clang_format.sh b/chromium/third_party/dawn/scripts/lint_clang_format.sh
deleted file mode 100755
index 82b572c23cd..00000000000
--- a/chromium/third_party/dawn/scripts/lint_clang_format.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-clang_format=$1
-base_commit=$2
-
-echo
-skipped_directories="(examples|generator|src/tests/(unittests|end2end)|third_party)"
-# Find the files modified that need formatting
-files_to_check=$(git diff --diff-filter=ACMR --name-only $base_commit | grep -E "*\.(c|cpp|mm|h)$" | grep -vE "^$skipped_directories/*")
-if [ -z "$files_to_check" ]; then
- echo "No modified files to format."
- exit 0
-fi
-echo "Checking formatting diff on these files:"
-echo "$files_to_check"
-echo
-files_to_check=$(echo $files_to_check | tr '\n' ' ')
-
-# Run git-clang-format, check if it formatted anything
-format_output=$(scripts/git-clang-format --binary $clang_format --commit $base_commit --diff --style=file $files_to_check)
-if [ "$format_output" == "clang-format did not modify any files" ] || [ "$format_output" == "no modified files to format" ] ; then
- exit 0
-fi
-
-# clang-format made changes, print them and fail Travis
-echo "Following formatting changes needed:"
-echo
-echo "$format_output"
-echo
-exit 1
diff --git a/chromium/third_party/dawn/scripts/perf_test_runner.py b/chromium/third_party/dawn/scripts/perf_test_runner.py
index c4cc55cf818..157d4497a14 100755
--- a/chromium/third_party/dawn/scripts/perf_test_runner.py
+++ b/chromium/third_party/dawn/scripts/perf_test_runner.py
@@ -22,7 +22,8 @@ import sys
import os
import re
-base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
+base_path = os.path.abspath(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# Look for a [Rr]elease build.
perftests_paths = glob.glob('out/*elease*')
@@ -35,6 +36,7 @@ if sys.platform == 'win32':
scores = []
+
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
@@ -102,6 +104,7 @@ if len(sys.argv) >= 2:
print('Using test executable: ' + perftests_path)
print('Test name: ' + test_name)
+
def get_results(metric, extra_args=[]):
process = subprocess.Popen(
[perftests_path, '--gtest_filter=' + test_name] + extra_args,
@@ -139,11 +142,14 @@ for experiment in range(max_experiments):
if (len(scores) > 1):
sys.stdout.write(", mean: %.2f" % mean(scores))
- sys.stdout.write(", variation: %.2f%%" % (coefficient_of_variation(scores) * 100.0))
+ sys.stdout.write(", variation: %.2f%%" %
+ (coefficient_of_variation(scores) * 100.0))
if (len(scores) > 7):
truncation_n = len(scores) >> 3
- sys.stdout.write(", truncated mean: %.2f" % truncated_mean(scores, truncation_n))
- sys.stdout.write(", variation: %.2f%%" % (truncated_cov(scores, truncation_n) * 100.0))
+ sys.stdout.write(", truncated mean: %.2f" %
+ truncated_mean(scores, truncation_n))
+ sys.stdout.write(", variation: %.2f%%" %
+ (truncated_cov(scores, truncation_n) * 100.0))
print("")
diff --git a/chromium/third_party/dawn/scripts/roll-shader-deps.sh b/chromium/third_party/dawn/scripts/roll-shader-deps.sh
index ac6db480ac0..db16800867b 100755
--- a/chromium/third_party/dawn/scripts/roll-shader-deps.sh
+++ b/chromium/third_party/dawn/scripts/roll-shader-deps.sh
@@ -26,7 +26,7 @@ spirv_cross_dir="third_party/spirv-cross/"
spirv_cross_trunk="origin/master"
spirv_headers_dir="third_party/spirv-headers/"
spirv_headers_trunk="origin/master"
-spirv_tools_dir="third_party/spirv-tools/"
+spirv_tools_dir="third_party/SPIRV-Tools/"
spirv_tools_trunk="origin/master"
tint_dir="third_party/tint/"
tint_trunk="origin/main"
diff --git a/chromium/third_party/dawn/scripts/travis_lint_format.sh b/chromium/third_party/dawn/scripts/travis_lint_format.sh
deleted file mode 100755
index 3515d606f64..00000000000
--- a/chromium/third_party/dawn/scripts/travis_lint_format.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The Dawn Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
- echo "Running outside of pull request isn't supported yet"
- exit 0
-fi
-
-# Choose the commit against which to format
-base_commit=$(git rev-parse $TRAVIS_BRANCH)
-echo "Formatting against $TRAVIS_BRANCH a.k.a. $base_commit..."
-echo
-
-scripts/lint_clang_format.sh $1 $base_commit
diff --git a/chromium/third_party/dawn/src/common/BUILD.gn b/chromium/third_party/dawn/src/common/BUILD.gn
index 59a23057224..85d63d1ede7 100644
--- a/chromium/third_party/dawn/src/common/BUILD.gn
+++ b/chromium/third_party/dawn/src/common/BUILD.gn
@@ -84,8 +84,9 @@ config("dawn_internal") {
}
# Only internal Dawn targets can use this config, this means only targets in
- # this BUILD.gn file.
- visibility = [ ":*" ]
+ # this BUILD.gn file and related subdirs.
+ visibility = [ "../*" ]
+
cflags = []
# Enable more warnings that were found when using Dawn in other projects
@@ -108,6 +109,12 @@ config("dawn_internal") {
"-Wtautological-unsigned-zero-compare",
]
+ # Allow comparison against type limits that might be tautological on 32bit
+ # or 64bit systems. Without this the following produces an error on 64bit:
+ #
+ # if (myUint64 > std::numeric_limits<size_t>::max()) {...}
+ cflags += [ "-Wno-tautological-type-limit-compare" ]
+
if (is_win) {
cflags += [
# clang-cl doesn't know -pedantic, pass it explicitly to the clang driver
@@ -165,13 +172,17 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"SerialStorage.h",
"SlabAllocator.cpp",
"SlabAllocator.h",
+ "StackContainer.h",
"SwapChainUtils.h",
"SystemUtils.cpp",
"SystemUtils.h",
"TypedInteger.h",
+ "UnderlyingType.h",
"ityp_array.h",
"ityp_bitset.h",
"ityp_span.h",
+ "ityp_stack_vec.h",
+ "ityp_vector.h",
"vulkan_platform.h",
"windows_with_undefs.h",
"xlib_with_undefs.h",
diff --git a/chromium/third_party/dawn/src/common/BitSetIterator.h b/chromium/third_party/dawn/src/common/BitSetIterator.h
index d35bc8a2df0..14ba4856ff7 100644
--- a/chromium/third_party/dawn/src/common/BitSetIterator.h
+++ b/chromium/third_party/dawn/src/common/BitSetIterator.h
@@ -48,7 +48,7 @@ class BitSetIterator final {
T operator*() const {
using U = UnderlyingType<T>;
- ASSERT(mCurrentBit <= std::numeric_limits<U>::max());
+ ASSERT(static_cast<U>(mCurrentBit) <= std::numeric_limits<U>::max());
return static_cast<T>(static_cast<U>(mCurrentBit));
}
diff --git a/chromium/third_party/dawn/src/common/CMakeLists.txt b/chromium/third_party/dawn/src/common/CMakeLists.txt
index 1ab20234b1f..a6f320e33fe 100644
--- a/chromium/third_party/dawn/src/common/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/common/CMakeLists.txt
@@ -41,6 +41,7 @@ target_sources(dawn_common PRIVATE
"SerialStorage.h"
"SlabAllocator.cpp"
"SlabAllocator.h"
+ "StackContainer.h"
"SwapChainUtils.h"
"SystemUtils.cpp"
"SystemUtils.h"
@@ -49,6 +50,8 @@ target_sources(dawn_common PRIVATE
"ityp_array.h"
"ityp_bitset.h"
"ityp_span.h"
+ "ityp_stack_vec.h"
+ "ityp_vector.h"
"vulkan_platform.h"
"windows_with_undefs.h"
"xlib_with_undefs.h"
diff --git a/chromium/third_party/dawn/src/common/Constants.h b/chromium/third_party/dawn/src/common/Constants.h
index df792cd5d6d..e281646bffa 100644
--- a/chromium/third_party/dawn/src/common/Constants.h
+++ b/chromium/third_party/dawn/src/common/Constants.h
@@ -18,8 +18,6 @@
#include <cstdint>
static constexpr uint32_t kMaxBindGroups = 4u;
-// TODO(cwallez@chromium.org): investigate bindgroup limits
-static constexpr uint32_t kMaxBindingsPerGroup = 16u;
static constexpr uint32_t kMaxVertexAttributes = 16u;
// Vulkan has a standalone limit named maxVertexInputAttributeOffset (2047u at least) for vertex
// attribute offset. The limit might be meaningless because Vulkan has another limit named
@@ -35,13 +33,21 @@ static constexpr uint32_t kMaxColorAttachments = 4u;
static constexpr uint32_t kTextureBytesPerRowAlignment = 256u;
// Dynamic buffer offsets require offset to be divisible by 256
static constexpr uint64_t kMinDynamicBufferOffsetAlignment = 256u;
-// Max numbers of dynamic uniform buffers
-static constexpr uint32_t kMaxDynamicUniformBufferCount = 8u;
-// Max numbers of dynamic storage buffers
-static constexpr uint32_t kMaxDynamicStorageBufferCount = 4u;
-// Max numbers of dynamic buffers
-static constexpr uint32_t kMaxDynamicBufferCount =
- kMaxDynamicUniformBufferCount + kMaxDynamicStorageBufferCount;
+
+// Per stage limits
+static constexpr uint32_t kMaxSampledTexturesPerShaderStage = 16;
+static constexpr uint32_t kMaxSamplersPerShaderStage = 16;
+static constexpr uint32_t kMaxStorageBuffersPerShaderStage = 6;
+static constexpr uint32_t kMaxStorageTexturesPerShaderStage = 4;
+static constexpr uint32_t kMaxUniformBuffersPerShaderStage = 12;
+
+// Per pipeline layout limits
+static constexpr uint32_t kMaxDynamicUniformBuffersPerPipelineLayout = 8u;
+static constexpr uint32_t kMaxDynamicStorageBuffersPerPipelineLayout = 4u;
+
+// Max size of uniform buffer binding
+static constexpr uint64_t kMaxUniformBufferBindingSize = 16384u;
+
// Indirect command sizes
static constexpr uint64_t kDispatchIndirectSize = 3 * sizeof(uint32_t);
static constexpr uint64_t kDrawIndirectSize = 4 * sizeof(uint32_t);
@@ -58,4 +64,8 @@ static constexpr uint32_t kMaxTexture2DMipLevels = 14u;
static_assert(1 << (kMaxTexture2DMipLevels - 1) == kMaxTextureSize,
"kMaxTexture2DMipLevels and kMaxTextureSize size mismatch");
+// Offset alignment for CopyB2B. Strictly speaking this alignment is required only
+// on macOS, but we decide to do it on all platforms.
+static constexpr uint64_t kCopyBufferToBufferOffsetAlignment = 4u;
+
#endif // COMMON_CONSTANTS_H_
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.cpp b/chromium/third_party/dawn/src/common/GPUInfo.cpp
index 60076d2830b..c3ea9cefb5c 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.cpp
+++ b/chromium/third_party/dawn/src/common/GPUInfo.cpp
@@ -36,4 +36,7 @@ namespace gpu_info {
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
}
+ bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
+ return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+ }
} // namespace gpu_info
diff --git a/chromium/third_party/dawn/src/common/GPUInfo.h b/chromium/third_party/dawn/src/common/GPUInfo.h
index d16c89b9b99..87efbbc9dfb 100644
--- a/chromium/third_party/dawn/src/common/GPUInfo.h
+++ b/chromium/third_party/dawn/src/common/GPUInfo.h
@@ -29,8 +29,10 @@ namespace gpu_info {
static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
+ static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
+ static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
bool IsAMD(PCIVendorID vendorId);
bool IsARM(PCIVendorID vendorId);
@@ -39,6 +41,7 @@ namespace gpu_info {
bool IsNvidia(PCIVendorID vendorId);
bool IsQualcomm(PCIVendorID vendorId);
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
+ bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
} // namespace gpu_info
#endif // COMMON_GPUINFO_H
diff --git a/chromium/third_party/dawn/src/common/HashUtils.h b/chromium/third_party/dawn/src/common/HashUtils.h
index 9d10ca713c2..1c33a3f2c1c 100644
--- a/chromium/third_party/dawn/src/common/HashUtils.h
+++ b/chromium/third_party/dawn/src/common/HashUtils.h
@@ -90,7 +90,7 @@ size_t Hash(const std::bitset<N>& value) {
namespace std {
template <typename Index, size_t N>
- class hash<ityp::bitset<Index, N>> {
+ struct hash<ityp::bitset<Index, N>> {
public:
size_t operator()(const ityp::bitset<Index, N>& value) const {
return Hash(static_cast<const std::bitset<N>&>(value));
diff --git a/chromium/third_party/dawn/src/common/LinkedList.h b/chromium/third_party/dawn/src/common/LinkedList.h
index 69fcf78ab29..47ca1eb1040 100644
--- a/chromium/third_party/dawn/src/common/LinkedList.h
+++ b/chromium/third_party/dawn/src/common/LinkedList.h
@@ -197,4 +197,4 @@ class LinkedList {
private:
LinkNode<T> root_;
};
-#endif // COMMON_LINKED_LIST_H \ No newline at end of file
+#endif // COMMON_LINKED_LIST_H
diff --git a/chromium/third_party/dawn/src/common/Math.cpp b/chromium/third_party/dawn/src/common/Math.cpp
index 62807556f55..8e5985f7e73 100644
--- a/chromium/third_party/dawn/src/common/Math.cpp
+++ b/chromium/third_party/dawn/src/common/Math.cpp
@@ -159,4 +159,4 @@ uint64_t RoundUp(uint64_t n, uint64_t m) {
ASSERT(n > 0);
ASSERT(m <= std::numeric_limits<uint64_t>::max() - n);
return ((n + m - 1) / m) * m;
-} \ No newline at end of file
+}
diff --git a/chromium/third_party/dawn/src/common/Math.h b/chromium/third_party/dawn/src/common/Math.h
index db941f279a9..c673785e260 100644
--- a/chromium/third_party/dawn/src/common/Math.h
+++ b/chromium/third_party/dawn/src/common/Math.h
@@ -31,6 +31,22 @@ uint32_t Log2(uint64_t value);
bool IsPowerOfTwo(uint64_t n);
uint64_t RoundUp(uint64_t n, uint64_t m);
+constexpr uint32_t ConstexprLog2(uint64_t v) {
+ return v <= 1 ? 0 : 1 + ConstexprLog2(v / 2);
+}
+
+constexpr uint32_t ConstexprLog2Ceil(uint64_t v) {
+ return v <= 1 ? 0 : ConstexprLog2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint32_t v) {
+ return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
+inline uint32_t Log2Ceil(uint64_t v) {
+ return v <= 1 ? 0 : Log2(v - 1) + 1;
+}
+
uint64_t NextPowerOfTwo(uint64_t n);
bool IsPtrAligned(const void* ptr, size_t alignment);
void* AlignVoidPtr(void* ptr, size_t alignment);
diff --git a/chromium/third_party/dawn/src/common/Platform.h b/chromium/third_party/dawn/src/common/Platform.h
index bc64db26d8f..af7b1751518 100644
--- a/chromium/third_party/dawn/src/common/Platform.h
+++ b/chromium/third_party/dawn/src/common/Platform.h
@@ -41,6 +41,10 @@
# define DAWN_PLATFORM_FUCHSIA 1
# define DAWN_PLATFORM_POSIX 1
+#elif defined(__EMSCRIPTEN__)
+# define DAWN_PLATFORM_EMSCRIPTEN 1
+# define DAWN_PLATFORM_POSIX 1
+
#else
# error "Unsupported platform."
#endif
@@ -60,7 +64,7 @@
# define DAWN_PLATFORM_64_BIT 1
static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
#elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
- defined(__s390__)
+ defined(__s390__) || defined(__EMSCRIPTEN__)
# define DAWN_PLATFORM_32_BIT 1
static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
#else
diff --git a/chromium/third_party/dawn/src/common/RefCounted.cpp b/chromium/third_party/dawn/src/common/RefCounted.cpp
index bb0f76d8842..af38fc668e2 100644
--- a/chromium/third_party/dawn/src/common/RefCounted.cpp
+++ b/chromium/third_party/dawn/src/common/RefCounted.cpp
@@ -75,4 +75,4 @@ void RefCounted::Release() {
void RefCounted::DeleteThis() {
delete this;
-} \ No newline at end of file
+}
diff --git a/chromium/third_party/dawn/src/common/RefCounted.h b/chromium/third_party/dawn/src/common/RefCounted.h
index b055d7b71e6..90144a6043b 100644
--- a/chromium/third_party/dawn/src/common/RefCounted.h
+++ b/chromium/third_party/dawn/src/common/RefCounted.h
@@ -122,6 +122,14 @@ class Ref {
mPointee = nullptr;
}
+ bool operator==(const T* other) const {
+ return mPointee == other;
+ }
+
+ bool operator!=(const T* other) const {
+ return mPointee != other;
+ }
+
operator bool() {
return mPointee != nullptr;
}
diff --git a/chromium/third_party/dawn/src/common/SerialStorage.h b/chromium/third_party/dawn/src/common/SerialStorage.h
index 6f382134a72..d71c7dd07d5 100644
--- a/chromium/third_party/dawn/src/common/SerialStorage.h
+++ b/chromium/third_party/dawn/src/common/SerialStorage.h
@@ -280,8 +280,8 @@ SerialStorage<Derived>::ConstIterator::ConstIterator(
}
template <typename Derived>
-typename SerialStorage<Derived>::ConstIterator& SerialStorage<Derived>::ConstIterator::
-operator++() {
+typename SerialStorage<Derived>::ConstIterator&
+SerialStorage<Derived>::ConstIterator::operator++() {
const Value* vectorData = mStorageIterator->second.data();
if (mSerialIterator == nullptr) {
diff --git a/chromium/third_party/dawn/src/common/StackContainer.h b/chromium/third_party/dawn/src/common/StackContainer.h
new file mode 100644
index 00000000000..be3cf32d0de
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/StackContainer.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is a modified copy of Chromium's /src/base/containers/stack_container.h
+
+#ifndef COMMON_STACKCONTAINER_H_
+#define COMMON_STACKCONTAINER_H_
+
+#include "common/Compiler.h"
+
+#include <cstddef>
+#include <vector>
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template <typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::size_type size_type;
+
+ // Backing store for the allocator. The container owner is responsible for
+ // maintaining this for as long as any containers using this allocator are
+ // live.
+ struct Source {
+ Source() : used_stack_buffer_(false) {
+ }
+
+ // Casts the buffer in its right type.
+ T* stack_buffer() {
+ return reinterpret_cast<T*>(stack_buffer_);
+ }
+ const T* stack_buffer() const {
+ return reinterpret_cast<const T*>(&stack_buffer_);
+ }
+
+ // The buffer itself. It is not of type T because we don't want the
+ // constructors and destructors to be automatically called. Define a POD
+ // buffer of the right size instead.
+ alignas(T) char stack_buffer_[sizeof(T[stack_capacity])];
+#if defined(DAWN_COMPILER_GCC) && !defined(__x86_64__) && !defined(__i386__)
+ static_assert(alignof(T) <= 16, "http://crbug.com/115612");
+#endif
+
+ // Set when the stack buffer is used for an allocation. We do not track
+ // how much of the buffer is used, only that somebody is using it.
+ bool used_stack_buffer_;
+ };
+
+ // Used by containers when they want to refer to an allocator of type U.
+ template <typename U>
+ struct rebind {
+ typedef StackAllocator<U, stack_capacity> other;
+ };
+
+ // For the straight up copy c-tor, we can share storage.
+ StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+ : std::allocator<T>(), source_(rhs.source_) {
+ }
+
+ // ISO C++ requires the following constructor to be defined,
+ // and std::vector in VC++2008SP1 Release fails with an error
+ // in the class _Container_base_aux_alloc_real (from <xutility>)
+ // if the constructor does not exist.
+ // For this constructor, we cannot share storage; there's
+ // no guarantee that the Source buffer of Ts is large enough
+ // for Us.
+ // TODO: If we were fancy pants, perhaps we could share storage
+ // iff sizeof(T) == sizeof(U).
+ template <typename U, size_t other_capacity>
+ StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
+ }
+
+ // This constructor must exist. It creates a default allocator that doesn't
+ // actually have a stack buffer. glibc's std::string() will compare the
+ // current allocator against the default-constructed allocator, so this
+ // should be fast.
+ StackAllocator() : source_(nullptr) {
+ }
+
+ explicit StackAllocator(Source* source) : source_(source) {
+ }
+
+ // Actually do the allocation. Use the stack buffer if nobody has used it yet
+ // and the size requested fits. Otherwise, fall through to the standard
+ // allocator.
+ pointer allocate(size_type n) {
+ if (source_ && !source_->used_stack_buffer_ && n <= stack_capacity) {
+ source_->used_stack_buffer_ = true;
+ return source_->stack_buffer();
+ } else {
+ return std::allocator<T>::allocate(n);
+ }
+ }
+
+ // Free: when trying to free the stack buffer, just mark it as free. For
+ // non-stack-buffer pointers, just fall though to the standard allocator.
+ void deallocate(pointer p, size_type n) {
+ if (source_ && p == source_->stack_buffer())
+ source_->used_stack_buffer_ = false;
+ else
+ std::allocator<T>::deallocate(p, n);
+ }
+
+ private:
+ Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// This will not work with std::string since some implementations allocate
+// more bytes than requested in calls to reserve(), forcing the allocation onto
+// the heap. http://crbug.com/709273
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template <typename TContainerType, size_t stack_capacity>
+class StackContainer {
+ public:
+ typedef TContainerType ContainerType;
+ typedef typename ContainerType::value_type ContainedType;
+ typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+ // Allocator must be constructed before the container!
+ StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+ // Make the container use the stack allocation by reserving our buffer size
+ // before doing anything else.
+ container_.reserve(stack_capacity);
+ }
+
+ // Getters for the actual container.
+ //
+ // Danger: any copies of this made using the copy constructor must have
+ // shorter lifetimes than the source. The copy will share the same allocator
+ // and therefore the same stack buffer as the original. Use std::copy to
+ // copy into a "real" container for longer-lived objects.
+ ContainerType& container() {
+ return container_;
+ }
+ const ContainerType& container() const {
+ return container_;
+ }
+
+ // Support operator-> to get to the container. This allows nicer syntax like:
+ // StackContainer<...> foo;
+ // std::sort(foo->begin(), foo->end());
+ ContainerType* operator->() {
+ return &container_;
+ }
+ const ContainerType* operator->() const {
+ return &container_;
+ }
+
+ // Retrieves the stack source so that that unit tests can verify that the
+ // buffer is being used properly.
+ const typename Allocator::Source& stack_data() const {
+ return stack_data_;
+ }
+
+ protected:
+ typename Allocator::Source stack_data_;
+ Allocator allocator_;
+ ContainerType container_;
+
+ private:
+ StackContainer(const StackContainer& rhs) = delete;
+ StackContainer& operator=(const StackContainer& rhs) = delete;
+ StackContainer(StackContainer&& rhs) = delete;
+ StackContainer& operator=(StackContainer&& rhs) = delete;
+};
+
+// Range-based iteration support for StackContainer.
+template <typename TContainerType, size_t stack_capacity>
+auto begin(const StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(begin(stack_container.container())) {
+ return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto begin(StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(begin(stack_container.container())) {
+ return begin(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(end(stack_container.container())) {
+ return end(stack_container.container());
+}
+
+template <typename TContainerType, size_t stack_capacity>
+auto end(const StackContainer<TContainerType, stack_capacity>& stack_container)
+ -> decltype(end(stack_container.container())) {
+ return end(stack_container.container());
+}
+
+// StackVector -----------------------------------------------------------------
+
+// Example:
+// StackVector<int, 16> foo;
+// foo->push_back(22); // we have overloaded operator->
+// foo[0] = 10; // as well as operator[]
+template <typename T, size_t stack_capacity>
+class StackVector
+ : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
+ public:
+ StackVector()
+ : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+ }
+
+ // We need to put this in STL containers sometimes, which requires a copy
+ // constructor. We can't call the regular copy constructor because that will
+ // take the stack buffer from the original. Here, we create an empty object
+ // and make a stack buffer of its own.
+ StackVector(const StackVector<T, stack_capacity>& other)
+ : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
+ this->container().assign(other->begin(), other->end());
+ }
+
+ StackVector<T, stack_capacity>& operator=(const StackVector<T, stack_capacity>& other) {
+ this->container().assign(other->begin(), other->end());
+ return *this;
+ }
+
+ // Vectors are commonly indexed, which isn't very convenient even with
+ // operator-> (using "->at()" does exception stuff we don't want).
+ T& operator[](size_t i) {
+ return this->container().operator[](i);
+ }
+ const T& operator[](size_t i) const {
+ return this->container().operator[](i);
+ }
+
+ private:
+ // StackVector(const StackVector& rhs) = delete;
+ // StackVector& operator=(const StackVector& rhs) = delete;
+ StackVector(StackVector&& rhs) = delete;
+ StackVector& operator=(StackVector&& rhs) = delete;
+};
+
+#endif // COMMON_STACKCONTAINER_H_
diff --git a/chromium/third_party/dawn/src/common/SystemUtils.cpp b/chromium/third_party/dawn/src/common/SystemUtils.cpp
index f8282eb4141..58aac01c443 100644
--- a/chromium/third_party/dawn/src/common/SystemUtils.cpp
+++ b/chromium/third_party/dawn/src/common/SystemUtils.cpp
@@ -108,6 +108,11 @@ std::string GetExecutablePath() {
// TODO: Implement on Fuchsia
return "";
}
+#elif defined(DAWN_PLATFORM_EMSCRIPTEN)
+std::string GetExecutablePath() {
+ UNREACHABLE();
+ return "";
+}
#else
# error "Implement GetExecutablePath for your platform."
#endif
diff --git a/chromium/third_party/dawn/src/common/ityp_array.h b/chromium/third_party/dawn/src/common/ityp_array.h
index d413ebc0ba0..fc772178ccc 100644
--- a/chromium/third_party/dawn/src/common/ityp_array.h
+++ b/chromium/third_party/dawn/src/common/ityp_array.h
@@ -65,19 +65,19 @@ namespace ityp {
return Base::at(index);
}
- Value* begin() noexcept {
+ typename Base::iterator begin() noexcept {
return Base::begin();
}
- const Value* begin() const noexcept {
+ typename Base::const_iterator begin() const noexcept {
return Base::begin();
}
- Value* end() noexcept {
+ typename Base::iterator end() noexcept {
return Base::end();
}
- const Value* end() const noexcept {
+ typename Base::const_iterator end() const noexcept {
return Base::end();
}
diff --git a/chromium/third_party/dawn/src/common/ityp_bitset.h b/chromium/third_party/dawn/src/common/ityp_bitset.h
index ef351d47d76..339cf182937 100644
--- a/chromium/third_party/dawn/src/common/ityp_bitset.h
+++ b/chromium/third_party/dawn/src/common/ityp_bitset.h
@@ -126,7 +126,7 @@ namespace ityp {
return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
}
- friend class std::hash<bitset>;
+ friend struct std::hash<bitset>;
};
} // namespace ityp
diff --git a/chromium/third_party/dawn/src/common/ityp_stack_vec.h b/chromium/third_party/dawn/src/common/ityp_stack_vec.h
new file mode 100644
index 00000000000..b88888b7897
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/ityp_stack_vec.h
@@ -0,0 +1,103 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_STACK_VEC_H_
+#define COMMON_ITYP_STACK_VEC_H_
+
+#include "common/Assert.h"
+#include "common/StackContainer.h"
+#include "common/UnderlyingType.h"
+
+namespace ityp {
+
+ template <typename Index, typename Value, size_t StaticCapacity>
+ class stack_vec : private StackVector<Value, StaticCapacity> {
+ using I = UnderlyingType<Index>;
+ using Base = StackVector<Value, StaticCapacity>;
+ using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+ static_assert(StaticCapacity <= std::numeric_limits<I>::max(), "");
+
+ public:
+ stack_vec() : Base() {
+ }
+ stack_vec(Index size) : Base() {
+ this->container().resize(static_cast<I>(size));
+ }
+
+ Value& operator[](Index i) {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ void resize(Index size) {
+ this->container().resize(static_cast<I>(size));
+ }
+
+ void reserve(Index size) {
+ this->container().reserve(static_cast<I>(size));
+ }
+
+ Value* data() {
+ return this->container().data();
+ }
+
+ const Value* data() const {
+ return this->container().data();
+ }
+
+ typename VectorBase::iterator begin() noexcept {
+ return this->container().begin();
+ }
+
+ typename VectorBase::const_iterator begin() const noexcept {
+ return this->container().begin();
+ }
+
+ typename VectorBase::iterator end() noexcept {
+ return this->container().end();
+ }
+
+ typename VectorBase::const_iterator end() const noexcept {
+ return this->container().end();
+ }
+
+ typename VectorBase::reference front() {
+ return this->container().front();
+ }
+
+ typename VectorBase::const_reference front() const {
+ return this->container().front();
+ }
+
+ typename VectorBase::reference back() {
+ return this->container().back();
+ }
+
+ typename VectorBase::const_reference back() const {
+ return this->container().back();
+ }
+
+ Index size() const {
+ return Index(static_cast<I>(this->container().size()));
+ }
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_STACK_VEC_H_
diff --git a/chromium/third_party/dawn/src/common/ityp_vector.h b/chromium/third_party/dawn/src/common/ityp_vector.h
new file mode 100644
index 00000000000..a747d5aeb53
--- /dev/null
+++ b/chromium/third_party/dawn/src/common/ityp_vector.h
@@ -0,0 +1,108 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COMMON_ITYP_VECTOR_H_
+#define COMMON_ITYP_VECTOR_H_
+
+#include "common/TypedInteger.h"
+#include "common/UnderlyingType.h"
+
+#include <type_traits>
+#include <vector>
+
+namespace ityp {
+
+ // ityp::vector is a helper class that wraps std::vector with the restriction that
+ // indices must be a particular type |Index|.
+ template <typename Index, typename Value>
+ class vector : public std::vector<Value> {
+ using I = UnderlyingType<Index>;
+ using Base = std::vector<Value>;
+
+ private:
+ // Disallow access to base constructors and untyped index/size-related operators.
+ using Base::Base;
+ using Base::operator=;
+ using Base::operator[];
+ using Base::at;
+ using Base::reserve;
+ using Base::resize;
+ using Base::size;
+
+ public:
+ vector() : Base() {
+ }
+
+ explicit vector(Index size) : Base(static_cast<I>(size)) {
+ }
+
+ vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
+ }
+
+ vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
+ }
+
+ vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
+ }
+
+ vector(std::initializer_list<Value> init) : Base(init) {
+ }
+
+ vector& operator=(const vector& rhs) {
+ Base::operator=(static_cast<const Base&>(rhs));
+ return *this;
+ }
+
+ vector& operator=(vector&& rhs) noexcept {
+ Base::operator=(static_cast<Base&&>(rhs));
+ return *this;
+ }
+
+ Value& operator[](Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ constexpr const Value& operator[](Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::operator[](static_cast<I>(i));
+ }
+
+ Value& at(Index i) {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr const Value& at(Index i) const {
+ ASSERT(i >= Index(0) && i < size());
+ return Base::at(static_cast<I>(i));
+ }
+
+ constexpr Index size() const {
+ ASSERT(std::numeric_limits<I>::max() >= Base::size());
+ return Index(static_cast<I>(Base::size()));
+ }
+
+ void resize(Index size) {
+ Base::resize(static_cast<I>(size));
+ }
+
+ void reserve(Index size) {
+ Base::reserve(static_cast<I>(size));
+ }
+ };
+
+} // namespace ityp
+
+#endif // COMMON_ITYP_VECTOR_H_
diff --git a/chromium/third_party/dawn/src/common/vulkan_platform.h b/chromium/third_party/dawn/src/common/vulkan_platform.h
index 113e831f432..236c68236c0 100644
--- a/chromium/third_party/dawn/src/common/vulkan_platform.h
+++ b/chromium/third_party/dawn/src/common/vulkan_platform.h
@@ -37,8 +37,18 @@
#if defined(DAWN_PLATFORM_64_BIT)
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
+// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+ return reinterpret_cast<T>(u64);
+}
#elif defined(DAWN_PLATFORM_32_BIT)
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+template <typename T>
+T NativeNonDispatachableHandleFromU64(uint64_t u64) {
+ return u64;
+}
#else
# error "Unsupported platform"
#endif
diff --git a/chromium/third_party/dawn/src/dawn/BUILD.gn b/chromium/third_party/dawn/src/dawn/BUILD.gn
index d095e9aab5f..9034be436f0 100644
--- a/chromium/third_party/dawn/src/dawn/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn/BUILD.gn
@@ -39,9 +39,7 @@ dawn_json_generator("emscripten_bits_gen") {
source_set("dawn_headers") {
all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
- public_deps = [
- ":dawn_headers_gen",
- ]
+ public_deps = [ ":dawn_headers_gen" ]
sources = get_target_outputs(":dawn_headers_gen")
sources += [ "${dawn_root}/src/include/dawn/dawn_wsi.h" ]
@@ -53,9 +51,7 @@ source_set("dawn_headers") {
dawn_json_generator("dawncpp_headers_gen") {
target = "dawncpp_headers"
- outputs = [
- "src/include/dawn/webgpu_cpp.h",
- ]
+ outputs = [ "src/include/dawn/webgpu_cpp.h" ]
}
source_set("dawncpp_headers") {
@@ -74,9 +70,7 @@ source_set("dawncpp_headers") {
dawn_json_generator("dawncpp_gen") {
target = "dawncpp"
- outputs = [
- "src/dawn/webgpu_cpp.cpp",
- ]
+ outputs = [ "src/dawn/webgpu_cpp.cpp" ]
}
source_set("dawncpp") {
@@ -93,20 +87,14 @@ source_set("dawncpp") {
dawn_json_generator("dawn_proc_gen") {
target = "dawn_proc"
- outputs = [
- "src/dawn/dawn_proc.c",
- ]
+ outputs = [ "src/dawn/dawn_proc.c" ]
}
dawn_component("dawn_proc") {
DEFINE_PREFIX = "WGPU"
- public_deps = [
- ":dawn_headers",
- ]
- deps = [
- ":dawn_proc_gen",
- ]
+ public_deps = [ ":dawn_headers" ]
+ deps = [ ":dawn_proc_gen" ]
sources = get_target_outputs(":dawn_proc_gen")
sources += [ "${dawn_root}/src/include/dawn/dawn_proc.h" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_native/BUILD.gn b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
index f6974c8a3d0..cecfc1e903e 100644
--- a/chromium/third_party/dawn/src/dawn_native/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_native/BUILD.gn
@@ -70,10 +70,7 @@ config("dawn_native_internal") {
config("dawn_native_weak_framework") {
if (is_mac && dawn_enable_metal) {
- ldflags = [
- "-weak_framework",
- "Metal",
- ]
+ weak_frameworks = [ "Metal.framework" ]
}
}
@@ -163,6 +160,7 @@ source_set("dawn_native_sources") {
"BindGroupLayout.cpp",
"BindGroupLayout.h",
"BindGroupTracker.h",
+ "BindingInfo.cpp",
"BindingInfo.h",
"BuddyAllocator.cpp",
"BuddyAllocator.h",
@@ -194,6 +192,8 @@ source_set("dawn_native_sources") {
"DynamicUploader.h",
"EncodingContext.cpp",
"EncodingContext.h",
+ "EnumClassBitmasks.h",
+ "EnumMaskIterator.h",
"Error.cpp",
"Error.h",
"ErrorData.cpp",
@@ -228,6 +228,8 @@ source_set("dawn_native_sources") {
"Pipeline.h",
"PipelineLayout.cpp",
"PipelineLayout.h",
+ "PooledResourceMemoryAllocator.cpp",
+ "PooledResourceMemoryAllocator.h",
"ProgrammablePassEncoder.cpp",
"ProgrammablePassEncoder.h",
"QuerySet.cpp",
@@ -320,6 +322,8 @@ source_set("dawn_native_sources") {
"d3d12/PipelineLayoutD3D12.h",
"d3d12/PlatformFunctions.cpp",
"d3d12/PlatformFunctions.h",
+ "d3d12/QuerySetD3D12.cpp",
+ "d3d12/QuerySetD3D12.h",
"d3d12/QueueD3D12.cpp",
"d3d12/QueueD3D12.h",
"d3d12/RenderPassBuilderD3D12.cpp",
@@ -357,7 +361,7 @@ source_set("dawn_native_sources") {
}
if (dawn_enable_metal) {
- libs += [
+ frameworks = [
"Cocoa.framework",
"IOKit.framework",
"IOSurface.framework",
@@ -495,6 +499,8 @@ source_set("dawn_native_sources") {
"vulkan/NativeSwapChainImplVk.h",
"vulkan/PipelineLayoutVk.cpp",
"vulkan/PipelineLayoutVk.h",
+ "vulkan/QuerySetVk.cpp",
+ "vulkan/QuerySetVk.h",
"vulkan/QueueVk.cpp",
"vulkan/QueueVk.h",
"vulkan/RenderPassCache.cpp",
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
index 4e4e67a447a..71e8a28d78d 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroup.cpp
@@ -32,7 +32,8 @@ namespace dawn_native {
MaybeError ValidateBufferBinding(const DeviceBase* device,
const BindGroupEntry& entry,
wgpu::BufferUsage requiredUsage,
- const BindingInfo& bindingInfo) {
+ const BindingInfo& bindingInfo,
+ const uint64_t maxBindingSize) {
if (entry.buffer == nullptr || entry.sampler != nullptr ||
entry.textureView != nullptr) {
return DAWN_VALIDATION_ERROR("expected buffer binding");
@@ -79,6 +80,14 @@ namespace dawn_native {
" bytes");
}
+ if (bindingSize > maxBindingSize) {
+ return DAWN_VALIDATION_ERROR(
+ "Binding size bigger than maximum uniform buffer binding size: binding " +
+ std::to_string(entry.binding) + " given " + std::to_string(bindingSize) +
+ " bytes, maximum is " + std::to_string(kMaxUniformBufferBindingSize) +
+ " bytes");
+ }
+
return {};
}
@@ -169,8 +178,9 @@ namespace dawn_native {
}
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+ ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
- ityp::bitset<BindingIndex, kMaxBindingsPerGroup> bindingsSet;
+ ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
const BindGroupEntry& entry = descriptor->entries[i];
@@ -192,12 +202,13 @@ namespace dawn_native {
switch (bindingInfo.type) {
case wgpu::BindingType::UniformBuffer:
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Uniform,
- bindingInfo));
+ bindingInfo, kMaxUniformBufferBindingSize));
break;
case wgpu::BindingType::StorageBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
DAWN_TRY(ValidateBufferBinding(device, entry, wgpu::BufferUsage::Storage,
- bindingInfo));
+ bindingInfo,
+ std::numeric_limits<uint64_t>::max()));
break;
case wgpu::BindingType::SampledTexture:
DAWN_TRY(ValidateTextureBinding(device, entry, wgpu::TextureUsage::Sampled,
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
index e17d26349f1..e34a16511a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupAndStorageBarrierTracker.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_BINDGROUPANDSTORAGEBARRIERTRACKER_H_
#include "common/ityp_bitset.h"
+#include "common/ityp_stack_vec.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/BindGroupTracker.h"
#include "dawn_native/Buffer.h"
@@ -39,11 +40,12 @@ namespace dawn_native {
ASSERT(index < kMaxBindGroupsTyped);
if (this->mBindGroups[index] != bindGroup) {
- mBindings[index] = {};
- mBindingsNeedingBarrier[index] = {};
-
const BindGroupLayoutBase* layout = bindGroup->GetLayout();
+ mBindings[index].resize(layout->GetBindingCount());
+ mBindingTypes[index].resize(layout->GetBindingCount());
+ mBindingsNeedingBarrier[index] = {};
+
for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBindingCount();
++bindingIndex) {
const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
@@ -91,15 +93,16 @@ namespace dawn_native {
}
protected:
- ityp::
- array<BindGroupIndex, ityp::bitset<BindingIndex, kMaxBindingsPerGroup>, kMaxBindGroups>
- mBindingsNeedingBarrier = {};
ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, wgpu::BindingType, kMaxBindingsPerGroup>,
+ ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout>,
+ kMaxBindGroups>
+ mBindingsNeedingBarrier = {};
+ ityp::array<BindGroupIndex,
+ ityp::stack_vec<BindingIndex, wgpu::BindingType, kMaxOptimalBindingsPerGroup>,
kMaxBindGroups>
mBindingTypes = {};
ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, ObjectBase*, kMaxBindingsPerGroup>,
+ ityp::stack_vec<BindingIndex, ObjectBase*, kMaxOptimalBindingsPerGroup>,
kMaxBindGroups>
mBindings = {};
};
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
index fce8b903293..9c5c8dca1fc 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.cpp
@@ -17,6 +17,7 @@
#include "common/BitSetIterator.h"
#include "common/HashUtils.h"
#include "dawn_native/Device.h"
+#include "dawn_native/PerStage.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <algorithm>
@@ -178,6 +179,8 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("3D texture bindings may not be multisampled");
case wgpu::TextureViewDimension::e1D:
+ return DAWN_VALIDATION_ERROR("1D texture bindings may not be multisampled");
+
case wgpu::TextureViewDimension::Undefined:
default:
UNREACHABLE();
@@ -194,8 +197,7 @@ namespace dawn_native {
}
std::set<BindingNumber> bindingsSet;
- uint32_t dynamicUniformBufferCount = 0;
- uint32_t dynamicStorageBufferCount = 0;
+ BindingCounts bindingCounts = {};
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
const BindGroupLayoutEntry& entry = descriptor->entries[i];
BindingNumber bindingNumber = BindingNumber(entry.binding);
@@ -226,45 +228,39 @@ namespace dawn_native {
switch (entry.type) {
case wgpu::BindingType::UniformBuffer:
- if (entry.hasDynamicOffset) {
- ++dynamicUniformBufferCount;
- }
- break;
case wgpu::BindingType::StorageBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer:
+ break;
+ case wgpu::BindingType::SampledTexture:
if (entry.hasDynamicOffset) {
- ++dynamicStorageBufferCount;
+ return DAWN_VALIDATION_ERROR("Sampled textures cannot be dynamic");
}
break;
- case wgpu::BindingType::SampledTexture:
case wgpu::BindingType::Sampler:
case wgpu::BindingType::ComparisonSampler:
+ if (entry.hasDynamicOffset) {
+ return DAWN_VALIDATION_ERROR("Samplers cannot be dynamic");
+ }
+ break;
case wgpu::BindingType::ReadonlyStorageTexture:
case wgpu::BindingType::WriteonlyStorageTexture:
if (entry.hasDynamicOffset) {
- return DAWN_VALIDATION_ERROR("Samplers and textures cannot be dynamic");
+ return DAWN_VALIDATION_ERROR("Storage textures cannot be dynamic");
}
break;
case wgpu::BindingType::StorageTexture:
return DAWN_VALIDATION_ERROR("storage textures aren't supported (yet)");
+ default:
+ UNREACHABLE();
+ break;
}
- bindingsSet.insert(bindingNumber);
- }
-
- if (bindingsSet.size() > kMaxBindingsPerGroup) {
- return DAWN_VALIDATION_ERROR("The number of bindings exceeds kMaxBindingsPerGroup.");
- }
+ IncrementBindingCounts(&bindingCounts, entry);
- if (dynamicUniformBufferCount > kMaxDynamicUniformBufferCount) {
- return DAWN_VALIDATION_ERROR(
- "The number of dynamic uniform buffer exceeds the maximum value");
+ bindingsSet.insert(bindingNumber);
}
- if (dynamicStorageBufferCount > kMaxDynamicStorageBufferCount) {
- return DAWN_VALIDATION_ERROR(
- "The number of dynamic storage buffer exceeds the maximum value");
- }
+ DAWN_TRY(ValidateBindingCounts(bindingCounts));
return {};
}
@@ -360,8 +356,6 @@ namespace dawn_native {
// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
// first.
bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
- ASSERT(bindings.size() <= BindingIndex(kMaxBindingsPerGroup));
-
BindingIndex lastBufferIndex{0};
BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
for (BindingIndex i{0}; i < bindings.size(); ++i) {
@@ -383,13 +377,12 @@ namespace dawn_native {
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor)
- : CachedObject(device), mBindingCount(descriptor->entryCount) {
+ : CachedObject(device), mBindingInfo(BindingIndex(descriptor->entryCount)) {
std::vector<BindGroupLayoutEntry> sortedBindings(
descriptor->entries, descriptor->entries + descriptor->entryCount);
-
std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
- for (BindingIndex i{0}; i < mBindingCount; ++i) {
+ for (BindingIndex i{0}; i < mBindingInfo.size(); ++i) {
const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
mBindingInfo[i].binding = BindingNumber(binding.binding);
mBindingInfo[i].type = binding.type;
@@ -399,53 +392,26 @@ namespace dawn_native {
mBindingInfo[i].storageTextureFormat = binding.storageTextureFormat;
mBindingInfo[i].minBufferBindingSize = binding.minBufferBindingSize;
- switch (binding.type) {
- case wgpu::BindingType::UniformBuffer:
- case wgpu::BindingType::StorageBuffer:
- case wgpu::BindingType::ReadonlyStorageBuffer:
- // Buffers must be contiguously packed at the start of the binding info.
- ASSERT(mBufferCount == i);
- ++mBufferCount;
- if (binding.minBufferBindingSize == 0) {
- ++mUnverifiedBufferCount;
- }
- break;
- default:
- break;
- }
-
if (binding.viewDimension == wgpu::TextureViewDimension::Undefined) {
- mBindingInfo[i].viewDimension = wgpu::TextureViewDimension::e2D;
+ mBindingInfo[i].viewDimension = wgpu::TextureViewDimension::e2D;
} else {
mBindingInfo[i].viewDimension = binding.viewDimension;
}
mBindingInfo[i].multisampled = binding.multisampled;
mBindingInfo[i].hasDynamicOffset = binding.hasDynamicOffset;
- if (binding.hasDynamicOffset) {
- switch (binding.type) {
- case wgpu::BindingType::UniformBuffer:
- ++mDynamicUniformBufferCount;
- break;
- case wgpu::BindingType::StorageBuffer:
- case wgpu::BindingType::ReadonlyStorageBuffer:
- ++mDynamicStorageBufferCount;
- break;
- case wgpu::BindingType::SampledTexture:
- case wgpu::BindingType::Sampler:
- case wgpu::BindingType::ComparisonSampler:
- case wgpu::BindingType::StorageTexture:
- case wgpu::BindingType::ReadonlyStorageTexture:
- case wgpu::BindingType::WriteonlyStorageTexture:
- UNREACHABLE();
- break;
- }
+
+ if (IsBufferBinding(binding.type)) {
+ // Buffers must be contiguously packed at the start of the binding info.
+ ASSERT(GetBufferCount() == i);
}
+ IncrementBindingCounts(&mBindingCounts, binding);
const auto& it = mBindingMap.emplace(BindingNumber(binding.binding), i);
ASSERT(it.second);
}
- ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), mBindingCount}));
+ ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+ ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -501,29 +467,26 @@ namespace dawn_native {
}
BindingIndex BindGroupLayoutBase::GetBindingCount() const {
- return mBindingCount;
+ return mBindingInfo.size();
}
BindingIndex BindGroupLayoutBase::GetBufferCount() const {
- return mBufferCount;
+ return BindingIndex(mBindingCounts.bufferCount);
}
BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
// This is a binding index because dynamic buffers are packed at the front of the binding
// info.
- return static_cast<BindingIndex>(mDynamicStorageBufferCount + mDynamicUniformBufferCount);
+ return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+ mBindingCounts.dynamicUniformBufferCount);
}
- uint32_t BindGroupLayoutBase::GetDynamicUniformBufferCount() const {
- return mDynamicUniformBufferCount;
- }
-
- uint32_t BindGroupLayoutBase::GetDynamicStorageBufferCount() const {
- return mDynamicStorageBufferCount;
+ uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+ return mBindingCounts.unverifiedBufferCount;
}
- uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
- return mUnverifiedBufferCount;
+ const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+ return mBindingCounts;
}
size_t BindGroupLayoutBase::GetBindingDataSize() const {
@@ -532,31 +495,29 @@ namespace dawn_native {
// Followed by:
// |---------buffer size array--------|
// |-uint64_t[mUnverifiedBufferCount]-|
- size_t objectPointerStart = static_cast<uint32_t>(mBufferCount) * sizeof(BufferBindingData);
+ size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
- size_t bufferSizeArrayStart = Align(
- objectPointerStart + static_cast<uint32_t>(mBindingCount) * sizeof(Ref<ObjectBase>),
- sizeof(uint64_t));
+ size_t bufferSizeArrayStart =
+ Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
+ sizeof(uint64_t));
ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
- return bufferSizeArrayStart + mUnverifiedBufferCount * sizeof(uint64_t);
+ return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
}
BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
void* dataStart) const {
BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
- auto bindings =
- reinterpret_cast<Ref<ObjectBase>*>(bufferData + static_cast<uint32_t>(mBufferCount));
- uint64_t* unverifiedBufferSizes =
- AlignPtr(reinterpret_cast<uint64_t*>(bindings + static_cast<uint32_t>(mBindingCount)),
- sizeof(uint64_t));
+ auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+ uint64_t* unverifiedBufferSizes = AlignPtr(
+ reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
- return {{bufferData, mBufferCount},
- {bindings, mBindingCount},
- {unverifiedBufferSizes, mUnverifiedBufferCount}};
+ return {{bufferData, GetBufferCount()},
+ {bindings, GetBindingCount()},
+ {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
}
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
index 4c3c4c63ca6..958abf6f9fe 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupLayout.h
@@ -18,8 +18,8 @@
#include "common/Constants.h"
#include "common/Math.h"
#include "common/SlabAllocator.h"
-#include "common/ityp_array.h"
#include "common/ityp_span.h"
+#include "common/ityp_vector.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/CachedObject.h"
#include "dawn_native/Error.h"
@@ -64,7 +64,7 @@ namespace dawn_native {
const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
ASSERT(!IsError());
- ASSERT(bindingIndex < BindingIndex(kMaxBindingsPerGroup));
+ ASSERT(bindingIndex < mBindingInfo.size());
return mBindingInfo[bindingIndex];
}
const BindingMap& GetBindingMap() const;
@@ -79,13 +79,16 @@ namespace dawn_native {
};
BindingIndex GetBindingCount() const;
+ // Returns |BindingIndex| because buffers are packed at the front.
BindingIndex GetBufferCount() const;
// Returns |BindingIndex| because dynamic buffers are packed at the front.
BindingIndex GetDynamicBufferCount() const;
- uint32_t GetDynamicUniformBufferCount() const;
- uint32_t GetDynamicStorageBufferCount() const;
uint32_t GetUnverifiedBufferCount() const;
+ // Used to get counts and validate them in pipeline layout creation. Other getters
+ // should be used to get typed integer counts.
+ const BindingCounts& GetBindingCountInfo() const;
+
struct BufferBindingData {
uint64_t offset;
uint64_t size;
@@ -120,13 +123,8 @@ namespace dawn_native {
private:
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
- BindingIndex mBindingCount;
- BindingIndex mBufferCount{0}; // |BindingIndex| because buffers are packed at the front.
- uint32_t mUnverifiedBufferCount = 0; // Buffers with minimum buffer size unspecified
- uint32_t mDynamicUniformBufferCount = 0;
- uint32_t mDynamicStorageBufferCount = 0;
-
- ityp::array<BindingIndex, BindingInfo, kMaxBindingsPerGroup> mBindingInfo;
+ BindingCounts mBindingCounts = {};
+ ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
// Map from BindGroupLayoutEntry.binding to packed indices.
BindingMap mBindingMap;
diff --git a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
index 8d03ebf3614..a3addb27b19 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindGroupTracker.h
@@ -103,7 +103,9 @@ namespace dawn_native {
BindGroupLayoutMask mBindGroupLayoutsMask = 0;
ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
- ityp::array<BindGroupIndex, std::array<DynamicOffset, kMaxBindingsPerGroup>, kMaxBindGroups>
+ ityp::array<BindGroupIndex,
+ std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+ kMaxBindGroups>
mDynamicOffsets = {};
// |mPipelineLayout| is the current pipeline layout set on the command buffer.
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp b/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp
new file mode 100644
index 00000000000..6ade32bb6b2
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/BindingInfo.cpp
@@ -0,0 +1,137 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/BindingInfo.h"
+
+namespace dawn_native {
+
+ void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+ bindingCounts->totalCount += 1;
+
+ uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+ switch (entry.type) {
+ case wgpu::BindingType::UniformBuffer:
+ ++bindingCounts->bufferCount;
+ if (entry.hasDynamicOffset) {
+ ++bindingCounts->dynamicUniformBufferCount;
+ }
+ if (entry.minBufferBindingSize == 0) {
+ ++bindingCounts->unverifiedBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+ break;
+
+ case wgpu::BindingType::StorageBuffer:
+ case wgpu::BindingType::ReadonlyStorageBuffer:
+ ++bindingCounts->bufferCount;
+ if (entry.hasDynamicOffset) {
+ ++bindingCounts->dynamicStorageBufferCount;
+ }
+ if (entry.minBufferBindingSize == 0) {
+ ++bindingCounts->unverifiedBufferCount;
+ }
+ perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+ break;
+
+ case wgpu::BindingType::SampledTexture:
+ perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+ break;
+
+ case wgpu::BindingType::Sampler:
+ case wgpu::BindingType::ComparisonSampler:
+ perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+ break;
+
+ case wgpu::BindingType::ReadonlyStorageTexture:
+ case wgpu::BindingType::WriteonlyStorageTexture:
+ perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+ break;
+
+ case wgpu::BindingType::StorageTexture:
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ ASSERT(perStageBindingCountMember != nullptr);
+ for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+ ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+ }
+ }
+
+ void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+ bindingCounts->totalCount += rhs.totalCount;
+ bindingCounts->bufferCount += rhs.bufferCount;
+ bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+ bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+ bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ bindingCounts->perStage[stage].sampledTextureCount +=
+ rhs.perStage[stage].sampledTextureCount;
+ bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+ bindingCounts->perStage[stage].storageBufferCount +=
+ rhs.perStage[stage].storageBufferCount;
+ bindingCounts->perStage[stage].storageTextureCount +=
+ rhs.perStage[stage].storageTextureCount;
+ bindingCounts->perStage[stage].uniformBufferCount +=
+ rhs.perStage[stage].uniformBufferCount;
+ }
+ }
+
+ MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+ if (bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of dynamic uniform buffers exceeds the maximum per-pipeline-layout "
+ "limit");
+ }
+
+ if (bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of dynamic storage buffers exceeds the maximum per-pipeline-layout "
+ "limit");
+ }
+
+ for (SingleShaderStage stage : IterateStages(kAllStages)) {
+ if (bindingCounts.perStage[stage].sampledTextureCount >
+ kMaxSampledTexturesPerShaderStage) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of sampled textures exceeds the maximum "
+ "per-stage limit.");
+ }
+ if (bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of samplers exceeds the maximum per-stage limit.");
+ }
+ if (bindingCounts.perStage[stage].storageBufferCount >
+ kMaxStorageBuffersPerShaderStage) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of storage buffers exceeds the maximum per-stage limit.");
+ }
+ if (bindingCounts.perStage[stage].storageTextureCount >
+ kMaxStorageTexturesPerShaderStage) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of storage textures exceeds the maximum per-stage limit.");
+ }
+ if (bindingCounts.perStage[stage].uniformBufferCount >
+ kMaxUniformBuffersPerShaderStage) {
+ return DAWN_VALIDATION_ERROR(
+ "The number of uniform buffers exceeds the maximum per-stage limit.");
+ }
+ }
+
+ return {};
+ }
+
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
index cac4f4ccaab..b6cfad15618 100644
--- a/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
+++ b/chromium/third_party/dawn/src/dawn_native/BindingInfo.h
@@ -18,7 +18,10 @@
#include "common/Constants.h"
#include "common/TypedInteger.h"
#include "common/ityp_array.h"
+#include "dawn_native/Error.h"
#include "dawn_native/Format.h"
+#include "dawn_native/PerStage.h"
+
#include "dawn_native/dawn_platform.h"
#include <cstdint>
@@ -33,9 +36,28 @@ namespace dawn_native {
using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
- static constexpr BindingIndex kMaxBindingsPerGroupTyped = BindingIndex(kMaxBindingsPerGroup);
static constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
+ // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+ static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+ kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+
+ static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+ BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+
+ // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+ // API. There should never be more bindings than the max per stage, for each stage.
+ static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+ 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+ kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+ kMaxUniformBuffersPerShaderStage);
+
+ static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+ BindingIndex(kMaxBindingsPerPipelineLayout);
+
+ // TODO(enga): Figure out a good number for this.
+ static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+
struct BindingInfo {
BindingNumber binding;
wgpu::ShaderStage visibility;
@@ -48,6 +70,27 @@ namespace dawn_native {
uint64_t minBufferBindingSize = 0;
};
+ struct PerStageBindingCounts {
+ uint32_t sampledTextureCount;
+ uint32_t samplerCount;
+ uint32_t storageBufferCount;
+ uint32_t storageTextureCount;
+ uint32_t uniformBufferCount;
+ };
+
+ struct BindingCounts {
+ uint32_t totalCount;
+ uint32_t bufferCount;
+ uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
+ uint32_t dynamicUniformBufferCount;
+ uint32_t dynamicStorageBufferCount;
+ PerStage<PerStageBindingCounts> perStage;
+ };
+
+ void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+ void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+ MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+
// For buffer size validation
using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
index 3c97655fbaa..4dd1f3a0864 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/Buffer.h"
#include "common/Assert.h"
+#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
@@ -32,20 +33,21 @@ namespace dawn_native {
class ErrorBuffer final : public BufferBase {
public:
- ErrorBuffer(DeviceBase* device) : BufferBase(device, ObjectBase::kError) {
- }
-
- static ErrorBuffer* MakeMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer) {
- ASSERT(mappedPointer != nullptr);
-
- ErrorBuffer* buffer = new ErrorBuffer(device);
- buffer->mFakeMappedData =
- std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[size]);
- *mappedPointer = buffer->mFakeMappedData.get();
-
- return buffer;
+ ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+ : BufferBase(device, descriptor, ObjectBase::kError) {
+ if (descriptor->mappedAtCreation) {
+ // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+ // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+ // would make size = 1 << 32 + 1 allocate one byte.
+ bool isValidSize =
+ descriptor->size != 0 &&
+ descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
+
+ if (isValidSize) {
+ mFakeMappedData = std::unique_ptr<uint8_t[]>(new (std::nothrow)
+ uint8_t[descriptor->size]);
+ }
+ }
}
void ClearMappedData() {
@@ -53,21 +55,25 @@ namespace dawn_native {
}
private:
- bool IsMapWritable() const override {
+ bool IsMappableAtCreation() const override {
UNREACHABLE();
return false;
}
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override {
+ MaybeError MapAtCreationImpl() override {
UNREACHABLE();
return {};
}
- MaybeError MapReadAsyncImpl(uint32_t serial) override {
+ MaybeError MapReadAsyncImpl() override {
UNREACHABLE();
return {};
}
- MaybeError MapWriteAsyncImpl(uint32_t serial) override {
+ MaybeError MapWriteAsyncImpl() override {
+ UNREACHABLE();
+ return {};
+ }
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
UNREACHABLE();
return {};
}
@@ -107,6 +113,10 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
}
+ if (descriptor->mappedAtCreation && descriptor->size % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("size must be aligned to 4 when mappedAtCreation is true");
+ }
+
return {};
}
@@ -125,8 +135,15 @@ namespace dawn_native {
}
}
- BufferBase::BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
- : ObjectBase(device, tag), mState(BufferState::Unmapped) {
+ BufferBase::BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag)
+ : ObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+ if (descriptor->mappedAtCreation) {
+ mState = BufferState::MappedAtCreation;
+ mMapOffset = 0;
+ mMapSize = mSize;
+ }
}
BufferBase::~BufferBase() {
@@ -134,19 +151,13 @@ namespace dawn_native {
ASSERT(!IsError());
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
+ CallMapCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown);
}
}
// static
- BufferBase* BufferBase::MakeError(DeviceBase* device) {
- return new ErrorBuffer(device);
- }
-
- // static
- BufferBase* BufferBase::MakeErrorMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer) {
- return ErrorBuffer::MakeMapped(device, size, mappedPointer);
+ BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+ return new ErrorBuffer(device, descriptor);
}
uint64_t BufferBase::GetSize() const {
@@ -159,23 +170,21 @@ namespace dawn_native {
return mUsage;
}
- MaybeError BufferBase::MapAtCreation(uint8_t** mappedPointer) {
+ MaybeError BufferBase::MapAtCreation() {
ASSERT(!IsError());
- ASSERT(mappedPointer != nullptr);
-
- // Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
- if (IsMapWritable()) {
- DAWN_TRY(MapAtCreationImpl(mappedPointer));
- mState = BufferState::Mapped;
- ASSERT(*mappedPointer != nullptr);
- return {};
- }
-
mState = BufferState::MappedAtCreation;
+ mMapOffset = 0;
+ mMapSize = mSize;
// 0-sized buffers are not supposed to be written to, Return back any non-null pointer.
+ // Handle 0-sized buffers first so we don't try to map them in the backend.
if (mSize == 0) {
- *mappedPointer = reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+ return {};
+ }
+
+ // Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
+ if (IsMappableAtCreation()) {
+ DAWN_TRY(MapAtCreationImpl());
return {};
}
@@ -185,9 +194,6 @@ namespace dawn_native {
// many small buffers.
DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetSize()));
- ASSERT(mStagingBuffer->GetMappedPointer() != nullptr);
- *mappedPointer = reinterpret_cast<uint8_t*>(mStagingBuffer->GetMappedPointer());
-
return {};
}
@@ -249,14 +255,26 @@ namespace dawn_native {
}
}
- void BufferBase::SetSubData(uint32_t start, uint32_t count, const void* data) {
- Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
- GetDevice()->EmitDeprecationWarning(
- "Buffer::SetSubData is deprecated, use Queue::WriteBuffer instead");
- queue->WriteBuffer(this, start, data, count);
+ void BufferBase::CallMapCallback(uint32_t serial, WGPUBufferMapAsyncStatus status) {
+ ASSERT(!IsError());
+ if (mMapCallback != nullptr && serial == mMapSerial) {
+ // Tag the callback as fired before firing it, otherwise it could fire a second time if
+ // for example buffer.Unmap() is called inside the application-provided callback.
+ WGPUBufferMapCallback callback = mMapCallback;
+ mMapCallback = nullptr;
+
+ if (GetDevice()->IsLost()) {
+ callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
+ } else {
+ callback(status, mMapUserdata);
+ }
+ }
}
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
+ GetDevice()->EmitDeprecationWarning(
+ "Buffer::MapReadAsync is deprecated. Use Buffer::MapAsync instead");
+
WGPUBufferMapAsyncStatus status;
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead, &status))) {
callback(status, nullptr, 0, userdata);
@@ -270,18 +288,23 @@ namespace dawn_native {
mMapSerial++;
mMapReadCallback = callback;
mMapUserdata = userdata;
+ mMapOffset = 0;
+ mMapSize = mSize;
mState = BufferState::Mapped;
- if (GetDevice()->ConsumedError(MapReadAsyncImpl(mMapSerial))) {
+ if (GetDevice()->ConsumedError(MapReadAsyncImpl())) {
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
- tracker->Track(this, mMapSerial, false);
+ tracker->Track(this, mMapSerial, MapType::Read);
}
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
+ GetDevice()->EmitDeprecationWarning(
+ "Buffer::MapReadAsync is deprecated. Use Buffer::MapAsync instead");
+
WGPUBufferMapAsyncStatus status;
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
callback(status, nullptr, 0, userdata);
@@ -295,22 +318,88 @@ namespace dawn_native {
mMapSerial++;
mMapWriteCallback = callback;
mMapUserdata = userdata;
+ mMapOffset = 0;
+ mMapSize = mSize;
mState = BufferState::Mapped;
- if (GetDevice()->ConsumedError(MapWriteAsyncImpl(mMapSerial))) {
+ if (GetDevice()->ConsumedError(MapWriteAsyncImpl())) {
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
- tracker->Track(this, mMapSerial, true);
+ tracker->Track(this, mMapSerial, MapType::Write);
+ }
+
+ void BufferBase::MapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+ // possible to default the function argument (because there is the callback later in the
+ // argument list)
+ if (size == 0 && offset < mSize) {
+ size = mSize - offset;
+ }
+
+ WGPUBufferMapAsyncStatus status;
+ if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status))) {
+ if (callback) {
+ callback(status, userdata);
+ }
+ return;
+ }
+ ASSERT(!IsError());
+
+ // TODO(cwallez@chromium.org): what to do on wraparound? Could cause crashes.
+ mMapSerial++;
+ mMapMode = mode;
+ mMapOffset = offset;
+ mMapSize = size;
+ mMapCallback = callback;
+ mMapUserdata = userdata;
+ mState = BufferState::Mapped;
+
+ if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+ CallMapCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost);
+ return;
+ }
+
+ MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
+ tracker->Track(this, mMapSerial, MapType::Async);
+ }
+
+ void* BufferBase::GetMappedRange(size_t offset, size_t size) {
+ return GetMappedRangeInternal(true, offset, size);
+ }
+
+ const void* BufferBase::GetConstMappedRange(size_t offset, size_t size) {
+ return GetMappedRangeInternal(false, offset, size);
+ }
+
+ // TODO(dawn:445): When CreateBufferMapped is removed, make GetMappedRangeInternal also take
+ // care of the validation of GetMappedRange.
+ void* BufferBase::GetMappedRangeInternal(bool writable, size_t offset, size_t size) {
+ if (!CanGetMappedRange(writable, offset, size)) {
+ return nullptr;
+ }
+
+ if (mStagingBuffer != nullptr) {
+ return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+ }
+ if (mSize == 0) {
+ return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+ }
+ return static_cast<uint8_t*>(GetMappedPointerImpl()) + offset;
}
void BufferBase::Destroy() {
if (IsError()) {
// It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
- reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
+ static_cast<ErrorBuffer*>(this)->ClearMappedData();
+ mState = BufferState::Destroyed;
}
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
@@ -322,8 +411,9 @@ namespace dawn_native {
} else if (mState == BufferState::MappedAtCreation) {
if (mStagingBuffer != nullptr) {
mStagingBuffer.reset();
- } else {
- ASSERT(mSize == 0);
+ } else if (mSize != 0) {
+ ASSERT(IsMappableAtCreation());
+ Unmap();
}
}
@@ -348,7 +438,8 @@ namespace dawn_native {
if (IsError()) {
// It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
- reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
+ static_cast<ErrorBuffer*>(this)->ClearMappedData();
+ mState = BufferState::Unmapped;
}
if (GetDevice()->ConsumedError(ValidateUnmap())) {
return;
@@ -362,17 +453,20 @@ namespace dawn_native {
// CreateBufferMapped.
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
+ CallMapCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown);
UnmapImpl();
mMapReadCallback = nullptr;
mMapWriteCallback = nullptr;
+ mMapCallback = nullptr;
mMapUserdata = 0;
} else if (mState == BufferState::MappedAtCreation) {
if (mStagingBuffer != nullptr) {
GetDevice()->ConsumedError(CopyFromStagingBuffer());
- } else {
- ASSERT(mSize == 0);
+ } else if (mSize != 0) {
+ ASSERT(IsMappableAtCreation());
+ UnmapImpl();
}
}
@@ -390,7 +484,7 @@ namespace dawn_native {
switch (mState) {
case BufferState::Mapped:
case BufferState::MappedAtCreation:
- return DAWN_VALIDATION_ERROR("Buffer already mapped");
+ return DAWN_VALIDATION_ERROR("Buffer is already mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
case BufferState::Unmapped:
@@ -405,6 +499,99 @@ namespace dawn_native {
return {};
}
+ MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const {
+ *status = WGPUBufferMapAsyncStatus_DeviceLost;
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+ *status = WGPUBufferMapAsyncStatus_Error;
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+
+ if (offset % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("offset must be a multiple of 4");
+ }
+
+ if (size % 4 != 0) {
+ return DAWN_VALIDATION_ERROR("size must be a multiple of 4");
+ }
+
+ if (uint64_t(offset) > mSize || uint64_t(size) > mSize - uint64_t(offset)) {
+ return DAWN_VALIDATION_ERROR("size + offset must fit in the buffer");
+ }
+
+ switch (mState) {
+ case BufferState::Mapped:
+ case BufferState::MappedAtCreation:
+ return DAWN_VALIDATION_ERROR("Buffer is already mapped");
+ case BufferState::Destroyed:
+ return DAWN_VALIDATION_ERROR("Buffer is destroyed");
+ case BufferState::Unmapped:
+ break;
+ }
+
+ bool isReadMode = mode & wgpu::MapMode::Read;
+ bool isWriteMode = mode & wgpu::MapMode::Write;
+ if (!(isReadMode ^ isWriteMode)) {
+ return DAWN_VALIDATION_ERROR("Exactly one of Read or Write mode must be set");
+ }
+
+ if (mode & wgpu::MapMode::Read) {
+ if (!(mUsage & wgpu::BufferUsage::MapRead)) {
+ return DAWN_VALIDATION_ERROR("The buffer must have the MapRead usage");
+ }
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+
+ if (!(mUsage & wgpu::BufferUsage::MapWrite)) {
+ return DAWN_VALIDATION_ERROR("The buffer must have the MapWrite usage");
+ }
+ }
+
+ *status = WGPUBufferMapAsyncStatus_Success;
+ return {};
+ }
+
+ bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+ if (size > mMapSize || offset < mMapOffset) {
+ return false;
+ }
+
+ size_t offsetInMappedRange = offset - mMapOffset;
+ if (offsetInMappedRange > mMapSize - size) {
+ return false;
+ }
+
+ // Note that:
+ //
+ // - We don't check that the device is alive because the application can ask for the
+ // mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+ // still needs to work properly.
+ // - We don't check that the object is alive because we need to return mapped pointers
+ // for error buffers too.
+
+ switch (mState) {
+ // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+ case BufferState::MappedAtCreation:
+ return true;
+
+ case BufferState::Mapped:
+ // TODO(dawn:445): When mapRead/WriteAsync is removed, check against mMapMode
+ // instead of mUsage
+ ASSERT(bool(mUsage & wgpu::BufferUsage::MapRead) ^
+ bool(mUsage & wgpu::BufferUsage::MapWrite));
+ return !writable || (mUsage & wgpu::BufferUsage::MapWrite);
+
+ case BufferState::Unmapped:
+ case BufferState::Destroyed:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
MaybeError BufferBase::ValidateUnmap() const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
@@ -439,17 +626,31 @@ namespace dawn_native {
mState = BufferState::Destroyed;
}
- bool BufferBase::IsMapped() const {
- return mState == BufferState::Mapped;
+ void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, MapType type) {
+ switch (type) {
+ case MapType::Read:
+ CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success,
+ GetMappedRangeInternal(false, 0, mSize), GetSize());
+ break;
+ case MapType::Write:
+ CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success,
+ GetMappedRangeInternal(true, 0, mSize), GetSize());
+ break;
+ case MapType::Async:
+ CallMapCallback(mapSerial, WGPUBufferMapAsyncStatus_Success);
+ break;
+ }
+ }
+
+ bool BufferBase::IsDataInitialized() const {
+ return mIsDataInitialized;
}
- void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
- void* data = GetMappedPointerImpl();
- if (isWrite) {
- CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- } else {
- CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- }
+ void BufferBase::SetIsDataInitialized() {
+ mIsDataInitialized = true;
}
+ bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+ return offset == 0 && size == GetSize();
+ }
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Buffer.h b/chromium/third_party/dawn/src/dawn_native/Buffer.h
index 8a1feb755ef..1dd5278ce72 100644
--- a/chromium/third_party/dawn/src/dawn_native/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/Buffer.h
@@ -25,6 +25,10 @@
namespace dawn_native {
+ struct CopyTextureToBufferCmd;
+
+ enum class MapType : uint32_t;
+
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
@@ -40,74 +44,92 @@ namespace dawn_native {
};
public:
- enum class ClearValue { Zero, NonZero };
-
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
- static BufferBase* MakeError(DeviceBase* device);
- static BufferBase* MakeErrorMapped(DeviceBase* device,
- uint64_t size,
- uint8_t** mappedPointer);
+ static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
uint64_t GetSize() const;
wgpu::BufferUsage GetUsage() const;
- MaybeError MapAtCreation(uint8_t** mappedPointer);
- void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
+ MaybeError MapAtCreation();
+ void OnMapCommandSerialFinished(uint32_t mapSerial, MapType type);
MaybeError ValidateCanUseOnQueueNow() const;
+ bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+ bool IsDataInitialized() const;
+ void SetIsDataInitialized();
+
// Dawn API
- void SetSubData(uint32_t start, uint32_t count, const void* data);
void MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata);
void MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata);
+ void MapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* GetMappedRange(size_t offset, size_t size);
+ const void* GetConstMappedRange(size_t offset, size_t size);
void Unmap();
void Destroy();
protected:
- BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+ BufferBase(DeviceBase* device,
+ const BufferDescriptor* descriptor,
+ ObjectBase::ErrorTag tag);
~BufferBase() override;
- void CallMapReadCallback(uint32_t serial,
- WGPUBufferMapAsyncStatus status,
- const void* pointer,
- uint64_t dataLength);
- void CallMapWriteCallback(uint32_t serial,
- WGPUBufferMapAsyncStatus status,
- void* pointer,
- uint64_t dataLength);
-
void DestroyInternal();
bool IsMapped() const;
private:
- virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0;
- virtual MaybeError MapReadAsyncImpl(uint32_t serial) = 0;
- virtual MaybeError MapWriteAsyncImpl(uint32_t serial) = 0;
+ virtual MaybeError MapAtCreationImpl() = 0;
+ virtual MaybeError MapReadAsyncImpl() = 0;
+ virtual MaybeError MapWriteAsyncImpl() = 0;
+ virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
virtual void UnmapImpl() = 0;
virtual void DestroyImpl() = 0;
virtual void* GetMappedPointerImpl() = 0;
- virtual bool IsMapWritable() const = 0;
+ virtual bool IsMappableAtCreation() const = 0;
MaybeError CopyFromStagingBuffer();
+ void* GetMappedRangeInternal(bool writable, size_t offset, size_t size);
+ void CallMapReadCallback(uint32_t serial,
+ WGPUBufferMapAsyncStatus status,
+ const void* pointer,
+ uint64_t dataLength);
+ void CallMapWriteCallback(uint32_t serial,
+ WGPUBufferMapAsyncStatus status,
+ void* pointer,
+ uint64_t dataLength);
+ void CallMapCallback(uint32_t serial, WGPUBufferMapAsyncStatus status);
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
WGPUBufferMapAsyncStatus* status) const;
+ MaybeError ValidateMapAsync(wgpu::MapMode mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const;
MaybeError ValidateDestroy() const;
+ bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
uint64_t mSize = 0;
wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+ BufferState mState;
+ bool mIsDataInitialized = false;
+
+ std::unique_ptr<StagingBufferBase> mStagingBuffer;
WGPUBufferMapReadCallback mMapReadCallback = nullptr;
WGPUBufferMapWriteCallback mMapWriteCallback = nullptr;
+ WGPUBufferMapCallback mMapCallback = nullptr;
void* mMapUserdata = 0;
uint32_t mMapSerial = 0;
-
- std::unique_ptr<StagingBufferBase> mStagingBuffer;
-
- BufferState mState;
+ wgpu::MapMode mMapMode = wgpu::MapMode::None;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
index cabbc1a6c31..afac9c5cd97 100644
--- a/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_native/CMakeLists.txt
@@ -35,6 +35,7 @@ target_sources(dawn_native PRIVATE
"BindGroupLayout.cpp"
"BindGroupLayout.h"
"BindGroupTracker.h"
+ "BindingInfo.cpp"
"BindingInfo.h"
"BuddyAllocator.cpp"
"BuddyAllocator.h"
@@ -66,6 +67,8 @@ target_sources(dawn_native PRIVATE
"DynamicUploader.h"
"EncodingContext.cpp"
"EncodingContext.h"
+ "EnumClassBitmasks.h"
+ "EnumMaskIterator.h"
"Error.cpp"
"Error.h"
"ErrorData.cpp"
@@ -100,6 +103,8 @@ target_sources(dawn_native PRIVATE
"Pipeline.h"
"PipelineLayout.cpp"
"PipelineLayout.h"
+ "PooledResourceMemoryAllocator.cpp"
+ "PooledResourceMemoryAllocator.h"
"ProgrammablePassEncoder.cpp"
"ProgrammablePassEncoder.h"
"QuerySet.cpp"
@@ -201,6 +206,8 @@ if (DAWN_ENABLE_D3D12)
"d3d12/PipelineLayoutD3D12.h"
"d3d12/PlatformFunctions.cpp"
"d3d12/PlatformFunctions.h"
+ "d3d12/QuerySetD3D12.cpp"
+ "d3d12/QuerySetD3D12.h"
"d3d12/QueueD3D12.cpp"
"d3d12/QueueD3D12.h"
"d3d12/RenderPassBuilderD3D12.cpp"
@@ -283,6 +290,7 @@ if (DAWN_ENABLE_METAL)
"-framework IOKit"
"-framework IOSurface"
"-framework QuartzCore"
+ "-framework Metal"
)
endif()
@@ -388,6 +396,8 @@ if (DAWN_ENABLE_VULKAN)
"vulkan/NativeSwapChainImplVk.h"
"vulkan/PipelineLayoutVk.cpp"
"vulkan/PipelineLayoutVk.h"
+ "vulkan/QuerySetVk.cpp"
+ "vulkan/QuerySetVk.h"
"vulkan/QueueVk.cpp"
"vulkan/QueueVk.h"
"vulkan/RenderPassCache.cpp"
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
index 553f8896577..7c94520e076 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.cpp
@@ -30,13 +30,7 @@ namespace dawn_native {
}
CommandIterator::~CommandIterator() {
- ASSERT(mDataWasDestroyed);
-
- if (!IsEmpty()) {
- for (auto& block : mBlocks) {
- free(block.block);
- }
- }
+ ASSERT(IsEmpty());
}
CommandIterator::CommandIterator(CommandIterator&& other) {
@@ -44,18 +38,13 @@ namespace dawn_native {
mBlocks = std::move(other.mBlocks);
other.Reset();
}
- other.DataWasDestroyed();
Reset();
}
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
- if (!other.IsEmpty()) {
- mBlocks = std::move(other.mBlocks);
- other.Reset();
- } else {
- mBlocks.clear();
- }
- other.DataWasDestroyed();
+ ASSERT(IsEmpty());
+ mBlocks = std::move(other.mBlocks);
+ other.Reset();
Reset();
return *this;
}
@@ -66,6 +55,7 @@ namespace dawn_native {
}
CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
+ ASSERT(IsEmpty());
mBlocks = allocator.AcquireBlocks();
Reset();
return *this;
@@ -97,8 +87,17 @@ namespace dawn_native {
}
}
- void CommandIterator::DataWasDestroyed() {
- mDataWasDestroyed = true;
+ void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+ if (IsEmpty()) {
+ return;
+ }
+
+ for (auto& block : mBlocks) {
+ free(block.block);
+ }
+ mBlocks.clear();
+ Reset();
+ ASSERT(IsEmpty());
}
bool CommandIterator::IsEmpty() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
index 82de05c1a45..0383dc1639a 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandAllocator.h
@@ -91,10 +91,13 @@ namespace dawn_native {
return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
}
- // Needs to be called if iteration was stopped early.
+ // Sets iterator to the beginning of the commands without emptying the list. This method can
+ // be used if iteration was stopped early and the iterator needs to be restarted.
void Reset();
- void DataWasDestroyed();
+ // This method must to be called after commands have been deleted. This indicates that the
+ // commands have been submitted and they are no longer valid.
+ void MakeEmptyAsDataWasDestroyed();
private:
bool IsEmpty() const;
@@ -139,7 +142,6 @@ namespace dawn_native {
size_t mCurrentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t mEndOfBlock = detail::kEndOfBlock;
- bool mDataWasDestroyed = false;
};
class CommandAllocator {
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
index 401451d4911..91fec29f180 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/CommandBuffer.h"
#include "common/BitSetIterator.h"
+#include "dawn_native/Buffer.h"
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Format.h"
@@ -23,18 +24,39 @@
namespace dawn_native {
CommandBufferBase::CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor*)
- : ObjectBase(encoder->GetDevice()), mResourceUsages(encoder->AcquireResourceUsages()) {
+ : ObjectBase(encoder->GetDevice()),
+ mCommands(encoder->AcquireCommands()),
+ mResourceUsages(encoder->AcquireResourceUsages()) {
}
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ObjectBase(device, tag) {
}
+ CommandBufferBase::~CommandBufferBase() {
+ Destroy();
+ }
+
// static
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
return new CommandBufferBase(device, ObjectBase::kError);
}
+ MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+
+ if (mDestroyed) {
+ return DAWN_VALIDATION_ERROR("Command buffer reused in submit");
+ }
+ return {};
+ }
+
+ void CommandBufferBase::Destroy() {
+ FreeCommands(&mCommands);
+ mResourceUsages = {};
+ mDestroyed = true;
+ }
+
const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
return mResourceUsages;
}
@@ -51,6 +73,17 @@ namespace dawn_native {
return false;
}
+ SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+ const Extent3D& copySize) {
+ switch (copy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D:
+ return {copy.mipLevel, 1, copy.origin.z, copySize.depth, copy.aspect};
+ default:
+ UNREACHABLE();
+ return {};
+ }
+ }
+
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
for (uint32_t i : IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
auto& attachmentInfo = renderPass->colorAttachments[i];
@@ -101,37 +134,75 @@ namespace dawn_native {
ASSERT(view->GetLevelCount() == 1);
SubresourceRange range = view->GetSubresourceRange();
+ SubresourceRange depthRange = range;
+ depthRange.aspects = range.aspects & Aspect::Depth;
+
+ SubresourceRange stencilRange = range;
+ stencilRange.aspects = range.aspects & Aspect::Stencil;
+
// If the depth stencil texture has not been initialized, we want to use loadop
// clear to init the contents to 0's
- if (!view->GetTexture()->IsSubresourceContentInitialized(range)) {
- if (view->GetTexture()->GetFormat().HasDepth() &&
- attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearDepth = 0.0f;
- attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
- }
- if (view->GetTexture()->GetFormat().HasStencil() &&
- attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
- attachmentInfo.clearStencil = 0u;
- attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
- }
+ if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
+ attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearDepth = 0.0f;
+ attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
}
- // If these have different store ops, make them both Store because we can't track
- // initialized state separately yet. TODO(crbug.com/dawn/145)
- if (attachmentInfo.depthStoreOp != attachmentInfo.stencilStoreOp) {
- attachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
- attachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+ if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
+ attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+ attachmentInfo.clearStencil = 0u;
+ attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
}
- if (attachmentInfo.depthStoreOp == wgpu::StoreOp::Store &&
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store) {
- view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
- } else {
- ASSERT(attachmentInfo.depthStoreOp == wgpu::StoreOp::Clear &&
- attachmentInfo.stencilStoreOp == wgpu::StoreOp::Clear);
- view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
- }
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
+
+ view->GetTexture()->SetIsSubresourceContentInitialized(
+ attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
}
}
+ // TODO(jiawei.shao@intel.com): support copying with depth stencil textures
+ bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
+ ASSERT(copy != nullptr);
+
+ if (copy->destination.offset > 0) {
+ return false;
+ }
+
+ if (copy->destination.rowsPerImage > copy->copySize.height) {
+ return false;
+ }
+
+ const TextureBase* texture = copy->source.texture.Get();
+ const uint64_t copyTextureDataSizePerRow = copy->copySize.width /
+ texture->GetFormat().blockWidth *
+ texture->GetFormat().blockByteSize;
+ if (copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
+ return false;
+ }
+
+ const uint64_t overwrittenRangeSize =
+ copyTextureDataSizePerRow * (copy->copySize.height / texture->GetFormat().blockHeight) *
+ copy->copySize.depth;
+ if (copy->destination.buffer->GetSize() > overwrittenRangeSize) {
+ return false;
+ }
+
+ return true;
+ }
+
+ std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn_native::Color color) {
+ const std::array<int32_t, 4> outputValue = {
+ static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
+ static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
+ return outputValue;
+ }
+
+ std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn_native::Color color) {
+ const std::array<uint32_t, 4> outputValue = {
+ static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
+ static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
+ return outputValue;
+ }
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
index c1d2597ff84..54a8f7649f4 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandBuffer.h
@@ -17,32 +17,55 @@
#include "dawn_native/dawn_platform.h"
+#include "dawn_native/CommandAllocator.h"
+#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
#include "dawn_native/ObjectBase.h"
#include "dawn_native/PassResourceUsage.h"
+#include "dawn_native/Texture.h"
namespace dawn_native {
struct BeginRenderPassCmd;
+ struct CopyTextureToBufferCmd;
+ struct TextureCopy;
class CommandBufferBase : public ObjectBase {
public:
CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+
static CommandBufferBase* MakeError(DeviceBase* device);
+ MaybeError ValidateCanUseInSubmitNow() const;
+ void Destroy();
+
const CommandBufferResourceUsage& GetResourceUsages() const;
+ protected:
+ ~CommandBufferBase();
+
+ CommandIterator mCommands;
+
private:
CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
CommandBufferResourceUsage mResourceUsages;
+ bool mDestroyed = false;
};
+
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
const Extent3D copySize,
const uint32_t mipLevel);
+ SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
+ const Extent3D& copySize);
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+ bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
+
+ std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn_native::Color color);
+ std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn_native::Color color);
+
} // namespace dawn_native
#endif // DAWNNATIVE_COMMANDBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
index 5b8bc640f83..db4968c3020 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/CommandEncoder.h"
#include "common/BitSetIterator.h"
+#include "common/Math.h"
#include "dawn_native/BindGroup.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
@@ -24,6 +25,7 @@
#include "dawn_native/ComputePassEncoder.h"
#include "dawn_native/Device.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderPassEncoder.h"
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -37,55 +39,6 @@ namespace dawn_native {
namespace {
- // TODO(jiawei.shao@intel.com): add validations on the texture-to-texture copies within the
- // same texture.
- MaybeError ValidateCopySizeFitsInTexture(const TextureCopyView& textureCopy,
- const Extent3D& copySize) {
- const TextureBase* texture = textureCopy.texture;
- if (textureCopy.mipLevel >= texture->GetNumMipLevels()) {
- return DAWN_VALIDATION_ERROR("Copy mipLevel out of range");
- }
-
- Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
- // For 2D textures, include the array layer as depth so it can be checked with other
- // dimensions.
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- mipSize.depth = texture->GetArrayLayers();
-
- // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
- // overflows.
- if (static_cast<uint64_t>(textureCopy.origin.x) +
- static_cast<uint64_t>(copySize.width) >
- static_cast<uint64_t>(mipSize.width) ||
- static_cast<uint64_t>(textureCopy.origin.y) +
- static_cast<uint64_t>(copySize.height) >
- static_cast<uint64_t>(mipSize.height) ||
- static_cast<uint64_t>(textureCopy.origin.z) +
- static_cast<uint64_t>(copySize.depth) >
- static_cast<uint64_t>(mipSize.depth)) {
- return DAWN_VALIDATION_ERROR("Copy would touch outside of the texture");
- }
-
- return {};
- }
-
- MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
- uint64_t offset,
- uint64_t size) {
- uint64_t bufferSize = buffer->GetSize();
- bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
- if (!fitsInBuffer) {
- return DAWN_VALIDATION_ERROR("Copy would overflow the buffer");
- }
-
- return {};
- }
-
- MaybeError ValidateCopySizeFitsInBuffer(const BufferCopyView& bufferCopy,
- uint64_t dataSize) {
- return ValidateCopySizeFitsInBuffer(bufferCopy.buffer, bufferCopy.offset, dataSize);
- }
-
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
uint64_t srcOffset,
uint64_t dstOffset) {
@@ -103,31 +56,6 @@ namespace dawn_native {
return {};
}
- MaybeError ValidateTexelBufferOffset(const BufferCopyView& bufferCopy,
- const Format& format) {
- if (bufferCopy.offset % format.blockByteSize != 0) {
- return DAWN_VALIDATION_ERROR(
- "Buffer offset must be a multiple of the texel or block size");
- }
-
- return {};
- }
-
- MaybeError ValidateRowsPerImage(const Format& format,
- uint32_t rowsPerImage,
- uint32_t copyHeight) {
- if (rowsPerImage < copyHeight) {
- return DAWN_VALIDATION_ERROR("rowsPerImage must not be less than the copy height.");
- }
-
- if (rowsPerImage % format.blockHeight != 0) {
- return DAWN_VALIDATION_ERROR(
- "rowsPerImage must be a multiple of compressed texture format block height");
- }
-
- return {};
- }
-
MaybeError ValidateTextureSampleCountInCopyCommands(const TextureBase* texture) {
if (texture->GetSampleCount() > 1) {
return DAWN_VALIDATION_ERROR("The sample count of textures must be 1");
@@ -173,6 +101,12 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Source and destination texture formats must match.");
}
+ if (src.aspect != wgpu::TextureAspect::All || dst.aspect != wgpu::TextureAspect::All) {
+ // Metal cannot select a single aspect for texture-to-texture copies
+ return DAWN_VALIDATION_ERROR(
+ "Texture aspect must be \"all\" for texture to texture copies");
+ }
+
if (src.texture->GetFormat().HasDepthOrStencil()) {
// D3D12 requires entire subresource to be copied when using CopyTextureRegion is
// used with depth/stencil.
@@ -192,72 +126,50 @@ namespace dawn_native {
return {};
}
- MaybeError ComputeTextureCopyBufferSize(const Format& textureFormat,
- const Extent3D& copySize,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- uint32_t* bufferSize) {
- ASSERT(rowsPerImage >= copySize.height);
- if (copySize.width == 0 || copySize.height == 0 || copySize.depth == 0) {
- *bufferSize = 0;
- return {};
- }
-
- uint32_t blockByteSize = textureFormat.blockByteSize;
- uint32_t blockWidth = textureFormat.blockWidth;
- uint32_t blockHeight = textureFormat.blockHeight;
-
- // TODO(cwallez@chromium.org): check for overflows
- uint32_t slicePitch = bytesPerRow * rowsPerImage / blockWidth;
-
- ASSERT(copySize.height >= 1);
- uint32_t sliceSize = bytesPerRow * (copySize.height / blockHeight - 1) +
- (copySize.width / blockWidth) * blockByteSize;
-
- ASSERT(copySize.depth >= 1);
- *bufferSize = (slicePitch * (copySize.depth - 1)) + sliceSize;
-
- return {};
- }
-
- MaybeError ValidateBytesPerRow(const Format& format,
- const Extent3D& copySize,
- uint32_t bytesPerRow) {
- if (bytesPerRow % kTextureBytesPerRowAlignment != 0) {
- return DAWN_VALIDATION_ERROR("bytesPerRow must be a multiple of 256");
- }
-
- if (bytesPerRow < copySize.width / format.blockWidth * format.blockByteSize) {
- return DAWN_VALIDATION_ERROR(
- "bytesPerRow must not be less than the number of bytes per row");
- }
-
- return {};
- }
-
- MaybeError ValidateImageOrigin(const Format& format, const Origin3D& offset) {
- if (offset.x % format.blockWidth != 0) {
- return DAWN_VALIDATION_ERROR(
- "Offset.x must be a multiple of compressed texture format block width");
- }
-
- if (offset.y % format.blockHeight != 0) {
- return DAWN_VALIDATION_ERROR(
- "Offset.y must be a multiple of compressed texture format block height");
- }
-
- return {};
- }
-
- MaybeError ValidateImageCopySize(const Format& format, const Extent3D& extent) {
- if (extent.width % format.blockWidth != 0) {
- return DAWN_VALIDATION_ERROR(
- "Extent.width must be a multiple of compressed texture format block width");
- }
-
- if (extent.height % format.blockHeight != 0) {
- return DAWN_VALIDATION_ERROR(
- "Extent.height must be a multiple of compressed texture format block height");
+ MaybeError ValidateTextureToBufferCopyRestrictions(const TextureCopyView& src) {
+ const Format& format = src.texture->GetFormat();
+
+ bool depthSelected = false;
+ switch (src.aspect) {
+ case wgpu::TextureAspect::All:
+ switch (format.aspects) {
+ case Aspect::Color:
+ case Aspect::Stencil:
+ break;
+ case Aspect::Depth:
+ depthSelected = true;
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR(
+ "A single aspect must be selected for multi planar formats in "
+ "texture to buffer copies");
+ }
+ break;
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(format.aspects & Aspect::Depth);
+ depthSelected = true;
+ break;
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(format.aspects & Aspect::Stencil);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (depthSelected) {
+ switch (format.format) {
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ return DAWN_VALIDATION_ERROR(
+ "The depth aspect of depth24plus texture cannot be selected in a "
+ "texture to buffer copy");
+ break;
+ case wgpu::TextureFormat::Depth32Float:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
return {};
@@ -442,15 +354,34 @@ namespace dawn_native {
DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
- if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
- std::isnan(depthStencilAttachment->clearDepth)) {
- return DAWN_VALIDATION_ERROR("Depth clear value cannot be NaN");
+ if (attachment->GetAspect() == wgpu::TextureAspect::All &&
+ attachment->GetFormat().HasStencil() &&
+ depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly) {
+ return DAWN_VALIDATION_ERROR(
+ "depthReadOnly and stencilReadOnly must be the same when texture aspect is "
+ "'all'");
}
- // This validates that the depth storeOp and stencil storeOps are the same
- if (depthStencilAttachment->depthStoreOp != depthStencilAttachment->stencilStoreOp) {
+ if (depthStencilAttachment->depthReadOnly &&
+ (depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Load ||
+ depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Store)) {
return DAWN_VALIDATION_ERROR(
- "The depth storeOp and stencil storeOp are not the same");
+ "depthLoadOp must be load and depthStoreOp must be store when depthReadOnly "
+ "is true.");
+ }
+
+ if (depthStencilAttachment->stencilReadOnly &&
+ (depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Load ||
+ depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Store)) {
+ return DAWN_VALIDATION_ERROR(
+ "stencilLoadOp must be load and stencilStoreOp must be store when "
+ "stencilReadOnly "
+ "is true.");
+ }
+
+ if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+ std::isnan(depthStencilAttachment->clearDepth)) {
+ return DAWN_VALIDATION_ERROR("Depth clear value cannot be NaN");
}
// *sampleCount == 0 must only happen when there is no color attachment. In that case we
@@ -489,6 +420,10 @@ namespace dawn_native {
device, descriptor->depthStencilAttachment, width, height, sampleCount));
}
+ if (descriptor->occlusionQuerySet != nullptr) {
+ return DAWN_VALIDATION_ERROR("occlusionQuerySet not implemented");
+ }
+
if (descriptor->colorAttachmentCount == 0 &&
descriptor->depthStencilAttachment == nullptr) {
return DAWN_VALIDATION_ERROR("Cannot use render pass with no attachments.");
@@ -502,23 +437,42 @@ namespace dawn_native {
return {};
}
- ResultOrError<TextureCopyView> FixTextureCopyView(DeviceBase* device,
- const TextureCopyView* view) {
- TextureCopyView fixedView = *view;
-
- if (view->arrayLayer != 0) {
- if (view->origin.z != 0) {
- return DAWN_VALIDATION_ERROR("arrayLayer and origin.z cannot both be != 0");
- } else {
- fixedView.origin.z = fixedView.arrayLayer;
- fixedView.arrayLayer = 1;
- device->EmitDeprecationWarning(
- "wgpu::TextureCopyView::arrayLayer is deprecated in favor of "
- "::origin::z");
- }
+ MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ const BufferBase* destination,
+ uint64_t destinationOffset) {
+ if (firstQuery >= querySet->GetQueryCount()) {
+ return DAWN_VALIDATION_ERROR("Query index out of bounds");
+ }
+
+ if (queryCount > querySet->GetQueryCount() - firstQuery) {
+ return DAWN_VALIDATION_ERROR(
+ "The sum of firstQuery and queryCount exceeds the number of queries in query "
+ "set");
+ }
+
+ // TODO(hao.x.li@intel.com): Validate that the queries between [firstQuery, firstQuery +
+ // queryCount - 1] must be available(written by query operations).
+
+ // The destinationOffset must be a multiple of 8 bytes on D3D12 and Vulkan
+ if (destinationOffset % 8 != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "The alignment offset into the destination buffer must be a multiple of 8 "
+ "bytes");
}
- return fixedView;
+ uint64_t bufferSize = destination->GetSize();
+ // The destination buffer must have enough storage, from destination offset, to contain
+ // the result of resolved queries
+ bool fitsInBuffer = destinationOffset <= bufferSize &&
+ (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
+ (bufferSize - destinationOffset));
+ if (!fitsInBuffer) {
+ return DAWN_VALIDATION_ERROR("The resolved query data would overflow the buffer");
+ }
+
+ return {};
}
} // namespace
@@ -529,14 +483,18 @@ namespace dawn_native {
CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
return CommandBufferResourceUsage{mEncodingContext.AcquirePassUsages(),
- std::move(mTopLevelBuffers),
- std::move(mTopLevelTextures)};
+ std::move(mTopLevelBuffers), std::move(mTopLevelTextures),
+ std::move(mUsedQuerySets)};
}
CommandIterator CommandEncoder::AcquireCommands() {
return mEncodingContext.AcquireCommands();
}
+ void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
+ mUsedQuerySets.insert(querySet);
+ }
+
// Implementation of the API's command recording methods
ComputePassEncoder* CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
@@ -681,69 +639,56 @@ namespace dawn_native {
const TextureCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- TextureCopyView fixedDest;
- DAWN_TRY_ASSIGN(fixedDest, FixTextureCopyView(GetDevice(), destination));
- destination = &fixedDest;
-
- // Validate objects before doing the defaulting.
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source->buffer));
- DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+ DAWN_TRY(ValidateBufferCopyView(GetDevice(), *source));
+ DAWN_TRY(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc));
+
+ DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination));
+ DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
+ DAWN_TRY(ValidateTextureSampleCountInCopyCommands(destination->texture));
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(*destination, *copySize));
+ DAWN_TRY(ValidateBufferToTextureCopyRestrictions(*destination));
+ DAWN_TRY(ValidateLinearTextureData(
+ source->layout, source->buffer->GetSize(),
+ destination->texture->GetFormat().GetTexelBlockInfo(destination->aspect),
+ *copySize));
+
+ mTopLevelBuffers.insert(source->buffer);
+ mTopLevelTextures.insert(destination->texture);
}
// Compute default value for rowsPerImage
- uint32_t defaultedRowsPerImage = source->rowsPerImage;
+ uint32_t defaultedRowsPerImage = source->layout.rowsPerImage;
if (defaultedRowsPerImage == 0) {
defaultedRowsPerImage = copySize->height;
}
- // Perform the rest of the validation using the default values.
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureSampleCountInCopyCommands(destination->texture));
-
- DAWN_TRY(ValidateRowsPerImage(destination->texture->GetFormat(),
- defaultedRowsPerImage, copySize->height));
- DAWN_TRY(
- ValidateImageOrigin(destination->texture->GetFormat(), destination->origin));
- DAWN_TRY(ValidateImageCopySize(destination->texture->GetFormat(), *copySize));
-
- uint32_t bufferCopySize = 0;
- DAWN_TRY(ValidateBytesPerRow(destination->texture->GetFormat(), *copySize,
- source->bytesPerRow));
-
- DAWN_TRY(ComputeTextureCopyBufferSize(destination->texture->GetFormat(), *copySize,
- source->bytesPerRow, defaultedRowsPerImage,
- &bufferCopySize));
-
- DAWN_TRY(ValidateCopySizeFitsInTexture(*destination, *copySize));
- DAWN_TRY(ValidateCopySizeFitsInBuffer(*source, bufferCopySize));
- DAWN_TRY(ValidateTexelBufferOffset(*source, destination->texture->GetFormat()));
-
- DAWN_TRY(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
-
- mTopLevelBuffers.insert(source->buffer);
- mTopLevelTextures.insert(destination->texture);
+ // In the case of one row copy bytesPerRow might not contain enough bytes
+ uint32_t bytesPerRow = source->layout.bytesPerRow;
+ if (copySize->height <= 1 && copySize->depth <= 1) {
+ bytesPerRow =
+ Align(copySize->width * destination->texture->GetFormat().blockByteSize,
+ kTextureBytesPerRowAlignment);
}
// Record the copy command.
CopyBufferToTextureCmd* copy =
allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
copy->source.buffer = source->buffer;
- copy->source.offset = source->offset;
- copy->source.bytesPerRow = source->bytesPerRow;
+ copy->source.offset = source->layout.offset;
+ copy->source.bytesPerRow = bytesPerRow;
copy->source.rowsPerImage = defaultedRowsPerImage;
copy->destination.texture = destination->texture;
copy->destination.origin = destination->origin;
- copy->copySize = *copySize;
copy->destination.mipLevel = destination->mipLevel;
- copy->destination.arrayLayer = destination->arrayLayer;
-
- // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
- copy->destination.arrayLayer = copy->destination.origin.z;
- copy->destination.origin.z = 0;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+ copy->copySize = *copySize;
return {};
});
@@ -753,49 +698,39 @@ namespace dawn_native {
const BufferCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- TextureCopyView fixedSrc;
- DAWN_TRY_ASSIGN(fixedSrc, FixTextureCopyView(GetDevice(), source));
- source = &fixedSrc;
-
- // Validate objects before doing the defaulting.
if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(GetDevice()->ValidateObject(source->texture));
- DAWN_TRY(GetDevice()->ValidateObject(destination->buffer));
+ DAWN_TRY(ValidateTextureCopyView(GetDevice(), *source));
+ DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
+ DAWN_TRY(ValidateTextureSampleCountInCopyCommands(source->texture));
+
+ DAWN_TRY(ValidateBufferCopyView(GetDevice(), *destination));
+ DAWN_TRY(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst));
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(*source, *copySize));
+ DAWN_TRY(ValidateTextureToBufferCopyRestrictions(*source));
+ DAWN_TRY(ValidateLinearTextureData(
+ destination->layout, destination->buffer->GetSize(),
+ source->texture->GetFormat().GetTexelBlockInfo(source->aspect), *copySize));
+
+ mTopLevelTextures.insert(source->texture);
+ mTopLevelBuffers.insert(destination->buffer);
}
// Compute default value for rowsPerImage
- uint32_t defaultedRowsPerImage = destination->rowsPerImage;
+ uint32_t defaultedRowsPerImage = destination->layout.rowsPerImage;
if (defaultedRowsPerImage == 0) {
defaultedRowsPerImage = copySize->height;
}
- // Perform the rest of the validation using the default values.
- if (GetDevice()->IsValidationEnabled()) {
- DAWN_TRY(ValidateTextureSampleCountInCopyCommands(source->texture));
-
- DAWN_TRY(ValidateRowsPerImage(source->texture->GetFormat(), defaultedRowsPerImage,
- copySize->height));
- DAWN_TRY(ValidateImageOrigin(source->texture->GetFormat(), source->origin));
- DAWN_TRY(ValidateImageCopySize(source->texture->GetFormat(), *copySize));
-
- uint32_t bufferCopySize = 0;
- DAWN_TRY(ValidateBytesPerRow(source->texture->GetFormat(), *copySize,
- destination->bytesPerRow));
- DAWN_TRY(ComputeTextureCopyBufferSize(source->texture->GetFormat(), *copySize,
- destination->bytesPerRow,
- defaultedRowsPerImage, &bufferCopySize));
-
- DAWN_TRY(ValidateCopySizeFitsInTexture(*source, *copySize));
- DAWN_TRY(ValidateCopySizeFitsInBuffer(*destination, bufferCopySize));
- DAWN_TRY(ValidateTexelBufferOffset(*destination, source->texture->GetFormat()));
-
- DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
- DAWN_TRY(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst));
-
- mTopLevelTextures.insert(source->texture);
- mTopLevelBuffers.insert(destination->buffer);
+ // In the case of one row copy bytesPerRow might not contain enough bytes
+ uint32_t bytesPerRow = destination->layout.bytesPerRow;
+ if (copySize->height <= 1 && copySize->depth <= 1) {
+ bytesPerRow = Align(copySize->width * source->texture->GetFormat().blockByteSize,
+ kTextureBytesPerRowAlignment);
}
// Record the copy command.
@@ -803,17 +738,13 @@ namespace dawn_native {
allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
copy->source.texture = source->texture;
copy->source.origin = source->origin;
- copy->copySize = *copySize;
copy->source.mipLevel = source->mipLevel;
- copy->source.arrayLayer = source->arrayLayer;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
copy->destination.buffer = destination->buffer;
- copy->destination.offset = destination->offset;
- copy->destination.bytesPerRow = destination->bytesPerRow;
+ copy->destination.offset = destination->layout.offset;
+ copy->destination.bytesPerRow = bytesPerRow;
copy->destination.rowsPerImage = defaultedRowsPerImage;
-
- // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
- copy->source.arrayLayer = copy->source.origin.z;
- copy->source.origin.z = 0;
+ copy->copySize = *copySize;
return {};
});
@@ -823,15 +754,6 @@ namespace dawn_native {
const TextureCopyView* destination,
const Extent3D* copySize) {
mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
- // TODO(dawn:22): Remove once migration from GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- TextureCopyView fixedSrc;
- DAWN_TRY_ASSIGN(fixedSrc, FixTextureCopyView(GetDevice(), source));
- source = &fixedSrc;
- TextureCopyView fixedDest;
- DAWN_TRY_ASSIGN(fixedDest, FixTextureCopyView(GetDevice(), destination));
- destination = &fixedDest;
-
if (GetDevice()->IsValidationEnabled()) {
DAWN_TRY(GetDevice()->ValidateObject(source->texture));
DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
@@ -839,14 +761,11 @@ namespace dawn_native {
DAWN_TRY(
ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
- DAWN_TRY(ValidateImageOrigin(source->texture->GetFormat(), source->origin));
- DAWN_TRY(ValidateImageCopySize(source->texture->GetFormat(), *copySize));
- DAWN_TRY(
- ValidateImageOrigin(destination->texture->GetFormat(), destination->origin));
- DAWN_TRY(ValidateImageCopySize(destination->texture->GetFormat(), *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(*source, *copySize));
+ DAWN_TRY(ValidateTextureCopyRange(*destination, *copySize));
- DAWN_TRY(ValidateCopySizeFitsInTexture(*source, *copySize));
- DAWN_TRY(ValidateCopySizeFitsInTexture(*destination, *copySize));
+ DAWN_TRY(ValidateTextureCopyView(GetDevice(), *source));
+ DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination));
DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc));
DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst));
@@ -860,19 +779,14 @@ namespace dawn_native {
copy->source.texture = source->texture;
copy->source.origin = source->origin;
copy->source.mipLevel = source->mipLevel;
- copy->source.arrayLayer = source->arrayLayer;
+ copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
copy->destination.texture = destination->texture;
copy->destination.origin = destination->origin;
copy->destination.mipLevel = destination->mipLevel;
- copy->destination.arrayLayer = destination->arrayLayer;
+ copy->destination.aspect =
+ ConvertAspect(destination->texture->GetFormat(), destination->aspect);
copy->copySize = *copySize;
- // TODO(cwallez@chromium.org): Make backends use origin.z instead of arrayLayer
- copy->source.arrayLayer = copy->source.origin.z;
- copy->source.origin.z = 0;
- copy->destination.arrayLayer = copy->destination.origin.z;
- copy->destination.origin.z = 0;
-
return {};
});
}
@@ -911,6 +825,54 @@ namespace dawn_native {
});
}
+ void CommandEncoder::ResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset) {
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+ DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+ destinationOffset));
+
+ DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+
+ TrackUsedQuerySet(querySet);
+ mTopLevelBuffers.insert(destination);
+ }
+
+ ResolveQuerySetCmd* cmd =
+ allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+ cmd->querySet = querySet;
+ cmd->firstQuery = firstQuery;
+ cmd->queryCount = queryCount;
+ cmd->destination = destination;
+ cmd->destinationOffset = destinationOffset;
+
+ return {};
+ });
+ }
+
+ void CommandEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext.TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ TrackUsedQuerySet(querySet);
+ }
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ });
+ }
+
CommandBufferBase* CommandEncoder::Finish(const CommandBufferDescriptor* descriptor) {
DeviceBase* device = GetDevice();
// Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
@@ -994,6 +956,16 @@ namespace dawn_native {
debugGroupStackSize++;
break;
}
+
+ case Command::ResolveQuerySet: {
+ commands->NextCommand<ResolveQuerySetCmd>();
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
default:
return DAWN_VALIDATION_ERROR("Command disallowed outside of a pass");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
index 2c89c4bd777..2fe801042ce 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandEncoder.h
@@ -35,6 +35,8 @@ namespace dawn_native {
CommandIterator AcquireCommands();
CommandBufferResourceUsage AcquireResourceUsages();
+ void TrackUsedQuerySet(QuerySetBase* querySet);
+
// Dawn API
ComputePassEncoder* BeginComputePass(const ComputePassDescriptor* descriptor);
RenderPassEncoder* BeginRenderPass(const RenderPassDescriptor* descriptor);
@@ -58,6 +60,13 @@ namespace dawn_native {
void PopDebugGroup();
void PushDebugGroup(const char* groupLabel);
+ void ResolveQuerySet(QuerySetBase* querySet,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ BufferBase* destination,
+ uint64_t destinationOffset);
+ void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
CommandBufferBase* Finish(const CommandBufferDescriptor* descriptor);
private:
@@ -67,6 +76,7 @@ namespace dawn_native {
EncodingContext mEncodingContext;
std::set<BufferBase*> mTopLevelBuffers;
std::set<TextureBase*> mTopLevelTextures;
+ std::set<QuerySetBase*> mUsedQuerySets;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
index 7f8da9bcdf2..497a8dee0de 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.cpp
@@ -19,7 +19,9 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBufferStateTracker.h"
#include "dawn_native/Commands.h"
+#include "dawn_native/Device.h"
#include "dawn_native/PassResourceUsage.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
@@ -202,6 +204,11 @@ namespace dawn_native {
break;
}
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
+
default:
DAWN_TRY(ValidateRenderBundleCommand(
commands, type, &commandBufferState, renderPass->attachmentState.Get(),
@@ -274,6 +281,11 @@ namespace dawn_native {
break;
}
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
+
default:
return DAWN_VALIDATION_ERROR("Command disallowed inside a compute pass");
}
@@ -339,6 +351,18 @@ namespace dawn_native {
return {};
}
+ MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex) {
+ if (querySet->GetQueryType() != wgpu::QueryType::Timestamp) {
+ return DAWN_VALIDATION_ERROR("The query type of query set must be Timestamp");
+ }
+
+ if (queryIndex >= querySet->GetQueryCount()) {
+ return DAWN_VALIDATION_ERROR("Query index exceeds the number of queries in query set");
+ }
+
+ return {};
+ }
+
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
uint32_t maxStart = std::max(startA, startB);
uint32_t minStart = std::min(startA, startB);
@@ -346,4 +370,226 @@ namespace dawn_native {
static_cast<uint64_t>(maxStart);
}
+ ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ // Default value for rowsPerImage
+ if (rowsPerImage == 0) {
+ rowsPerImage = copySize.height;
+ }
+
+ ASSERT(rowsPerImage >= copySize.height);
+ if (copySize.height > 1 || copySize.depth > 1) {
+ ASSERT(bytesPerRow >= copySize.width / blockInfo.blockWidth * blockInfo.blockByteSize);
+ }
+
+ if (copySize.width == 0 || copySize.height == 0 || copySize.depth == 0) {
+ return 0;
+ }
+
+ ASSERT(copySize.height >= 1);
+ ASSERT(copySize.depth >= 1);
+
+ uint32_t texelBlockRowsPerImage = rowsPerImage / blockInfo.blockHeight;
+ // bytesPerImage won't overflow since we're multiplying two uint32_t numbers
+ uint64_t bytesPerImage = uint64_t(texelBlockRowsPerImage) * bytesPerRow;
+ // Provided that copySize.height > 1: bytesInLastSlice won't overflow since it's at most
+ // bytesPerImage. Otherwise the result is a multiplication of two uint32_t numbers.
+ uint64_t bytesInLastSlice =
+ uint64_t(bytesPerRow) * (copySize.height / blockInfo.blockHeight - 1) +
+ (uint64_t(copySize.width) / blockInfo.blockWidth * blockInfo.blockByteSize);
+
+ // This error cannot be thrown for copySize.depth = 1.
+ // For copySize.depth > 1 we know that:
+ // requiredBytesInCopy >= (copySize.depth * bytesPerImage) / 2, so if
+ // copySize.depth * bytesPerImage overflows uint64_t, then requiredBytesInCopy is definitely
+ // too large to fit in the available data size.
+ if (std::numeric_limits<uint64_t>::max() / copySize.depth < bytesPerImage) {
+ return DAWN_VALIDATION_ERROR("requiredBytesInCopy is too large");
+ }
+ return bytesPerImage * (copySize.depth - 1) + bytesInLastSlice;
+ }
+
+ MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size) {
+ uint64_t bufferSize = buffer->GetSize();
+ bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
+ if (!fitsInBuffer) {
+ return DAWN_VALIDATION_ERROR("Copy would overflow the buffer");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent) {
+ // Validation for the texel block alignments:
+ if (layout.rowsPerImage % blockInfo.blockHeight != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "rowsPerImage must be a multiple of compressed texture format block height");
+ }
+
+ if (layout.offset % blockInfo.blockByteSize != 0) {
+ return DAWN_VALIDATION_ERROR("Offset must be a multiple of the texel or block size");
+ }
+
+ // Validation for other members in layout:
+ if ((copyExtent.height > 1 || copyExtent.depth > 1) &&
+ layout.bytesPerRow <
+ copyExtent.width / blockInfo.blockWidth * blockInfo.blockByteSize) {
+ return DAWN_VALIDATION_ERROR(
+ "bytesPerRow must not be less than the number of bytes per row");
+ }
+
+ // TODO(tommek@google.com): to match the spec there should be another condition here
+ // on rowsPerImage >= copyExtent.height if copyExtent.depth > 1.
+
+ // Validation for the copy being in-bounds:
+ if (layout.rowsPerImage != 0 && layout.rowsPerImage < copyExtent.height) {
+ return DAWN_VALIDATION_ERROR("rowsPerImage must not be less than the copy height.");
+ }
+
+ // We compute required bytes in copy after validating texel block alignments
+ // because the divisibility conditions are necessary for the algorithm to be valid,
+ // also the bytesPerRow bound is necessary to avoid overflows.
+ uint64_t requiredBytesInCopy;
+ DAWN_TRY_ASSIGN(requiredBytesInCopy,
+ ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
+ layout.rowsPerImage));
+
+ bool fitsInData =
+ layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+ if (!fitsInData) {
+ return DAWN_VALIDATION_ERROR(
+ "Required size for texture data layout exceeds the given size");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateBufferCopyView(DeviceBase const* device,
+ const BufferCopyView& bufferCopyView) {
+ DAWN_TRY(device->ValidateObject(bufferCopyView.buffer));
+ if (bufferCopyView.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0) {
+ return DAWN_VALIDATION_ERROR("bytesPerRow must be a multiple of 256");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureCopyView(DeviceBase const* device,
+ const TextureCopyView& textureCopy) {
+ DAWN_TRY(device->ValidateObject(textureCopy.texture));
+ if (textureCopy.mipLevel >= textureCopy.texture->GetNumMipLevels()) {
+ return DAWN_VALIDATION_ERROR("mipLevel out of range");
+ }
+
+ if (textureCopy.origin.x % textureCopy.texture->GetFormat().blockWidth != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Offset.x must be a multiple of compressed texture format block width");
+ }
+
+ if (textureCopy.origin.y % textureCopy.texture->GetFormat().blockHeight != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Offset.y must be a multiple of compressed texture format block height");
+ }
+
+ switch (textureCopy.aspect) {
+ case wgpu::TextureAspect::All:
+ break;
+ case wgpu::TextureAspect::DepthOnly:
+ if ((textureCopy.texture->GetFormat().aspects & Aspect::Depth) == 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Texture does not have depth aspect for texture copy");
+ }
+ break;
+ case wgpu::TextureAspect::StencilOnly:
+ if ((textureCopy.texture->GetFormat().aspects & Aspect::Stencil) == 0) {
+ return DAWN_VALIDATION_ERROR(
+ "Texture does not have stencil aspect for texture copy");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateTextureCopyRange(const TextureCopyView& textureCopy,
+ const Extent3D& copySize) {
+ // TODO(jiawei.shao@intel.com): add validations on the texture-to-texture copies within the
+ // same texture.
+ const TextureBase* texture = textureCopy.texture;
+
+ // Validation for the copy being in-bounds:
+ Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+ // For 2D textures, include the array layer as depth so it can be checked with other
+ // dimensions.
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+ mipSize.depth = texture->GetArrayLayers();
+
+ // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
+ // overflows.
+ if (static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+ static_cast<uint64_t>(mipSize.width) ||
+ static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
+ static_cast<uint64_t>(mipSize.height) ||
+ static_cast<uint64_t>(textureCopy.origin.z) + static_cast<uint64_t>(copySize.depth) >
+ static_cast<uint64_t>(mipSize.depth)) {
+ return DAWN_VALIDATION_ERROR("Touching outside of the texture");
+ }
+
+ // Validation for the texel block alignments:
+ if (copySize.width % textureCopy.texture->GetFormat().blockWidth != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "copySize.width must be a multiple of compressed texture format block width");
+ }
+
+ if (copySize.height % textureCopy.texture->GetFormat().blockHeight != 0) {
+ return DAWN_VALIDATION_ERROR(
+ "copySize.height must be a multiple of compressed texture format block height");
+ }
+
+ return {};
+ }
+
+ MaybeError ValidateBufferToTextureCopyRestrictions(const TextureCopyView& dst) {
+ const Format& format = dst.texture->GetFormat();
+
+ bool depthSelected = false;
+ switch (dst.aspect) {
+ case wgpu::TextureAspect::All:
+ switch (format.aspects) {
+ case Aspect::Color:
+ case Aspect::Stencil:
+ break;
+ case Aspect::Depth:
+ depthSelected = true;
+ break;
+ default:
+ return DAWN_VALIDATION_ERROR(
+ "A single aspect must be selected for multi planar formats in buffer "
+ "to texture copies");
+ }
+ break;
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(format.aspects & Aspect::Depth);
+ depthSelected = true;
+ break;
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(format.aspects & Aspect::Stencil);
+ break;
+ }
+ if (depthSelected) {
+ return DAWN_VALIDATION_ERROR("Cannot copy into the depth aspect of a texture");
+ }
+ return {};
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
index 53871ccdc5e..0aaa398ade0 100644
--- a/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
+++ b/chromium/third_party/dawn/src/dawn_native/CommandValidation.h
@@ -17,14 +17,17 @@
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/Error.h"
+#include "dawn_native/Texture.h"
#include <vector>
namespace dawn_native {
class AttachmentState;
+ class QuerySetBase;
struct BeginRenderPassCmd;
struct PassResourceUsage;
+ struct TexelBlockInfo;
MaybeError ValidateCanPopDebugGroup(uint64_t debugGroupStackSize);
MaybeError ValidateFinalDebugGroupStackSize(uint64_t debugGroupStackSize);
@@ -36,6 +39,36 @@ namespace dawn_native {
MaybeError ValidatePassResourceUsage(const PassResourceUsage& usage);
+ MaybeError ValidateTimestampQuery(QuerySetBase* querySet, uint32_t queryIndex);
+
+ ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+ MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+ uint64_t byteSize,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& copyExtent);
+ MaybeError ValidateTextureCopyRange(const TextureCopyView& textureCopyView,
+ const Extent3D& copySize);
+ MaybeError ValidateBufferToTextureCopyRestrictions(const TextureCopyView& dst);
+
+ MaybeError ValidateBufferCopyView(DeviceBase const* device,
+ const BufferCopyView& bufferCopyView);
+ MaybeError ValidateTextureCopyView(DeviceBase const* device,
+ const TextureCopyView& textureCopyView);
+
+ MaybeError ValidateRowsPerImage(const Format& format,
+ uint32_t rowsPerImage,
+ uint32_t copyHeight);
+ MaybeError ValidateBytesPerRow(const Format& format,
+ const Extent3D& copySize,
+ uint32_t bytesPerRow);
+ MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+ uint64_t offset,
+ uint64_t size);
+
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.cpp b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
index b4098108a32..621a7434abe 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.cpp
@@ -18,6 +18,7 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandAllocator.h"
#include "dawn_native/ComputePipeline.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
#include "dawn_native/Texture.h"
@@ -127,6 +128,11 @@ namespace dawn_native {
cmd->~PushDebugGroupCmd();
break;
}
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
+ cmd->~ResolveQuerySetCmd();
+ break;
+ }
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
cmd->~SetComputePipelineCmd();
@@ -175,9 +181,15 @@ namespace dawn_native {
cmd->~SetVertexBufferCmd();
break;
}
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
+ cmd->~WriteTimestampCmd();
+ break;
+ }
}
}
- commands->DataWasDestroyed();
+
+ commands->MakeEmptyAsDataWasDestroyed();
}
void SkipCommand(CommandIterator* commands, Command type) {
@@ -260,6 +272,11 @@ namespace dawn_native {
break;
}
+ case Command::ResolveQuerySet: {
+ commands->NextCommand<ResolveQuerySetCmd>();
+ break;
+ }
+
case Command::SetComputePipeline:
commands->NextCommand<SetComputePipelineCmd>();
break;
@@ -300,6 +317,11 @@ namespace dawn_native {
commands->NextCommand<SetVertexBufferCmd>();
break;
}
+
+ case Command::WriteTimestamp: {
+ commands->NextCommand<WriteTimestampCmd>();
+ break;
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Commands.h b/chromium/third_party/dawn/src/dawn_native/Commands.h
index 155a21d83be..094867839fd 100644
--- a/chromium/third_party/dawn/src/dawn_native/Commands.h
+++ b/chromium/third_party/dawn/src/dawn_native/Commands.h
@@ -51,6 +51,7 @@ namespace dawn_native {
InsertDebugMarker,
PopDebugGroup,
PushDebugGroup,
+ ResolveQuerySet,
SetComputePipeline,
SetRenderPipeline,
SetStencilReference,
@@ -60,6 +61,7 @@ namespace dawn_native {
SetBindGroup,
SetIndexBuffer,
SetVertexBuffer,
+ WriteTimestamp,
};
struct BeginComputePassCmd {};
@@ -102,8 +104,8 @@ namespace dawn_native {
struct TextureCopy {
Ref<TextureBase> texture;
uint32_t mipLevel;
- uint32_t arrayLayer;
- Origin3D origin; // Texels
+ Origin3D origin; // Texels / array layer
+ Aspect aspect;
};
struct CopyBufferToBufferCmd {
@@ -186,6 +188,14 @@ namespace dawn_native {
uint32_t length;
};
+ struct ResolveQuerySetCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t firstQuery;
+ uint32_t queryCount;
+ Ref<BufferBase> destination;
+ uint64_t destinationOffset;
+ };
+
struct SetComputePipelineCmd {
Ref<ComputePipelineBase> pipeline;
};
@@ -229,6 +239,11 @@ namespace dawn_native {
uint64_t size;
};
+ struct WriteTimestampCmd {
+ Ref<QuerySetBase> querySet;
+ uint32_t queryIndex;
+ };
+
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
// the commands have a chance to run their destructor and remove internal references.
class CommandIterator;
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
index 5e36601fcc5..2329c0008e2 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.cpp
@@ -16,9 +16,11 @@
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/ComputePipeline.h"
#include "dawn_native/Device.h"
+#include "dawn_native/QuerySet.h"
namespace dawn_native {
@@ -96,4 +98,21 @@ namespace dawn_native {
});
}
+ void ComputePassEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ mCommandEncoder->TrackUsedQuerySet(querySet);
+ }
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ });
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
index f790aad51ee..6ae796a2411 100644
--- a/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/ComputePassEncoder.h
@@ -36,6 +36,8 @@ namespace dawn_native {
void DispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
void SetPipeline(ComputePipelineBase* pipeline);
+ void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
protected:
ComputePassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
diff --git a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
index 32061152bdf..b7bb94b32d1 100644
--- a/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DawnNative.cpp
@@ -149,6 +149,10 @@ namespace dawn_native {
mImpl->EnableBackendValidation(enableBackendValidation);
}
+ void Instance::EnableGPUBasedBackendValidation(bool enableGPUBasedBackendValidation) {
+ mImpl->EnableGPUBasedBackendValidation(enableGPUBasedBackendValidation);
+ }
+
void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
}
@@ -175,10 +179,13 @@ namespace dawn_native {
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
- uint32_t layerCount) {
+ uint32_t layerCount,
+ WGPUTextureAspect aspect) {
dawn_native::TextureBase* textureBase =
reinterpret_cast<dawn_native::TextureBase*>(texture);
- SubresourceRange range = {baseMipLevel, levelCount, baseArrayLayer, layerCount};
+ SubresourceRange range = {
+ baseMipLevel, levelCount, baseArrayLayer, layerCount,
+ ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(aspect))};
return textureBase->IsSubresourceContentInitialized(range);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.cpp b/chromium/third_party/dawn/src/dawn_native/Device.cpp
index 1c30cd509c8..fcb2876ac9b 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Device.cpp
@@ -112,6 +112,8 @@ namespace dawn_native {
// alive.
mState = State::Alive;
+ DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
+
return {};
}
@@ -169,6 +171,8 @@ namespace dawn_native {
mDynamicUploader = nullptr;
mMapRequestTracker = nullptr;
+ mEmptyBindGroupLayout = nullptr;
+
AssumeCommandsComplete();
// Tell the backend that it can free all the objects now that the GPU timeline is empty.
ShutDownImpl();
@@ -388,21 +392,22 @@ namespace dawn_native {
return mFormatTable[index];
}
- ResultOrError<BindGroupLayoutBase*> DeviceBase::GetOrCreateBindGroupLayout(
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor) {
BindGroupLayoutBase blueprint(this, descriptor);
+ Ref<BindGroupLayoutBase> result = nullptr;
auto iter = mCaches->bindGroupLayouts.find(&blueprint);
if (iter != mCaches->bindGroupLayouts.end()) {
- (*iter)->Reference();
- return *iter;
+ result = *iter;
+ } else {
+ BindGroupLayoutBase* backendObj;
+ DAWN_TRY_ASSIGN(backendObj, CreateBindGroupLayoutImpl(descriptor));
+ backendObj->SetIsCachedReference();
+ mCaches->bindGroupLayouts.insert(backendObj);
+ result = AcquireRef(backendObj);
}
-
- BindGroupLayoutBase* backendObj;
- DAWN_TRY_ASSIGN(backendObj, CreateBindGroupLayoutImpl(descriptor));
- backendObj->SetIsCachedReference();
- mCaches->bindGroupLayouts.insert(backendObj);
- return backendObj;
+ return std::move(result);
}
void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
@@ -411,6 +416,20 @@ namespace dawn_native {
ASSERT(removedCount == 1);
}
+ // Private function used at initialization
+ ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
+ BindGroupLayoutDescriptor desc = {};
+ desc.entryCount = 0;
+ desc.entries = nullptr;
+
+ return GetOrCreateBindGroupLayout(&desc);
+ }
+
+ BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
+ ASSERT(mEmptyBindGroupLayout);
+ return mEmptyBindGroupLayout.Get();
+ }
+
ResultOrError<ComputePipelineBase*> DeviceBase::GetOrCreateComputePipeline(
const ComputePipelineDescriptor* descriptor) {
ComputePipelineBase blueprint(this, descriptor);
@@ -585,43 +604,32 @@ namespace dawn_native {
return result;
}
BufferBase* DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
- BufferBase* result = nullptr;
+ Ref<BufferBase> result = nullptr;
if (ConsumedError(CreateBufferInternal(descriptor), &result)) {
- ASSERT(result == nullptr);
- return BufferBase::MakeError(this);
+ ASSERT(result.Get() == nullptr);
+ return BufferBase::MakeError(this, descriptor);
}
- return result;
+ return result.Detach();
}
WGPUCreateBufferMappedResult DeviceBase::CreateBufferMapped(
const BufferDescriptor* descriptor) {
- BufferBase* buffer = nullptr;
- uint8_t* data = nullptr;
-
- uint64_t size = descriptor->size;
- if (ConsumedError(CreateBufferInternal(descriptor), &buffer) ||
- ConsumedError(buffer->MapAtCreation(&data))) {
- // Map failed. Replace the buffer with an error buffer.
- if (buffer != nullptr) {
- buffer->Release();
- }
- buffer = BufferBase::MakeErrorMapped(this, size, &data);
- }
+ EmitDeprecationWarning(
+ "CreateBufferMapped is deprecated, use wgpu::BufferDescriptor::mappedAtCreation and "
+ "wgpu::Buffer::GetMappedRange instead");
- ASSERT(buffer != nullptr);
- if (data == nullptr) {
- // |data| may be nullptr if there was an OOM in MakeErrorMapped.
- // Non-zero dataLength and nullptr data is used to indicate there should be
- // mapped data but the allocation failed.
- ASSERT(buffer->IsError());
- } else {
- memset(data, 0, size);
- }
+ BufferDescriptor fixedDesc = *descriptor;
+ fixedDesc.mappedAtCreation = true;
+ BufferBase* buffer = CreateBuffer(&fixedDesc);
WGPUCreateBufferMappedResult result = {};
result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
- result.data = data;
- result.dataLength = size;
+ result.data = buffer->GetMappedRange(0, descriptor->size);
+ result.dataLength = descriptor->size;
+
+ if (result.data != nullptr) {
+ memset(result.data, 0, result.dataLength);
+ }
return result;
}
@@ -734,7 +742,8 @@ namespace dawn_native {
// For Dawn Wire
BufferBase* DeviceBase::CreateErrorBuffer() {
- return BufferBase::MakeError(this);
+ BufferDescriptor desc = {};
+ return BufferBase::MakeError(this, &desc);
}
// Other Device API methods
@@ -813,6 +822,10 @@ namespace dawn_native {
return !IsToggleEnabled(Toggle::SkipValidation);
}
+ bool DeviceBase::IsRobustnessEnabled() const {
+ return !IsToggleEnabled(Toggle::DisableRobustness);
+ }
+
size_t DeviceBase::GetLazyClearCountForTesting() {
return mLazyClearCountForTesting;
}
@@ -851,17 +864,27 @@ namespace dawn_native {
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBindGroupLayoutDescriptor(this, descriptor));
}
- DAWN_TRY_ASSIGN(*result, GetOrCreateBindGroupLayout(descriptor));
+ Ref<BindGroupLayoutBase> bgl;
+ DAWN_TRY_ASSIGN(bgl, GetOrCreateBindGroupLayout(descriptor));
+ *result = bgl.Detach();
return {};
}
- ResultOrError<BufferBase*> DeviceBase::CreateBufferInternal(
+ ResultOrError<Ref<BufferBase>> DeviceBase::CreateBufferInternal(
const BufferDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) {
DAWN_TRY(ValidateBufferDescriptor(this, descriptor));
}
- return CreateBufferImpl(descriptor);
+
+ Ref<BufferBase> buffer;
+ DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+
+ if (descriptor->mappedAtCreation) {
+ DAWN_TRY(buffer->MapAtCreation());
+ }
+
+ return std::move(buffer);
}
MaybeError DeviceBase::CreateComputePipelineInternal(
@@ -1007,13 +1030,6 @@ namespace dawn_native {
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTextureInternal(
const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive());
-
- // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
- // GPUTextureDescriptor.size.depth is done.
- TextureDescriptor fixedDescriptor;
- DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(this, descriptor));
- descriptor = &fixedDescriptor;
-
if (IsValidationEnabled()) {
DAWN_TRY(ValidateTextureDescriptor(this, descriptor));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Device.h b/chromium/third_party/dawn/src/dawn_native/Device.h
index 94d916f7c2b..16f076c3343 100644
--- a/chromium/third_party/dawn/src/dawn_native/Device.h
+++ b/chromium/third_party/dawn/src/dawn_native/Device.h
@@ -108,10 +108,12 @@ namespace dawn_native {
// the created object will be, the "blueprint". The blueprint is just a FooBase object
// instead of a backend Foo object. If the blueprint doesn't match an object in the
// cache, then the descriptor is used to make a new object.
- ResultOrError<BindGroupLayoutBase*> GetOrCreateBindGroupLayout(
+ ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
const BindGroupLayoutDescriptor* descriptor);
void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
+ BindGroupLayoutBase* GetEmptyBindGroupLayout();
+
ResultOrError<ComputePipelineBase*> GetOrCreateComputePipeline(
const ComputePipelineDescriptor* descriptor);
void UncacheComputePipeline(ComputePipelineBase* obj);
@@ -214,6 +216,7 @@ namespace dawn_native {
bool IsExtensionEnabled(Extension extension) const;
bool IsToggleEnabled(Toggle toggle) const;
bool IsValidationEnabled() const;
+ bool IsRobustnessEnabled() const;
size_t GetLazyClearCountForTesting();
void IncrementLazyClearCountForTesting();
size_t GetDeprecationWarningCountForTesting();
@@ -238,7 +241,8 @@ namespace dawn_native {
const BindGroupDescriptor* descriptor) = 0;
virtual ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) = 0;
- virtual ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) = 0;
+ virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) = 0;
virtual ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) = 0;
virtual ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
@@ -264,11 +268,13 @@ namespace dawn_native {
TextureBase* texture,
const TextureViewDescriptor* descriptor) = 0;
+ ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
+
MaybeError CreateBindGroupInternal(BindGroupBase** result,
const BindGroupDescriptor* descriptor);
MaybeError CreateBindGroupLayoutInternal(BindGroupLayoutBase** result,
const BindGroupLayoutDescriptor* descriptor);
- ResultOrError<BufferBase*> CreateBufferInternal(const BufferDescriptor* descriptor);
+ ResultOrError<Ref<BufferBase>> CreateBufferInternal(const BufferDescriptor* descriptor);
MaybeError CreateComputePipelineInternal(ComputePipelineBase** result,
const ComputePipelineDescriptor* descriptor);
MaybeError CreatePipelineLayoutInternal(PipelineLayoutBase** result,
@@ -340,6 +346,8 @@ namespace dawn_native {
struct Caches;
std::unique_ptr<Caches> mCaches;
+ Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
+
std::unique_ptr<DynamicUploader> mDynamicUploader;
std::unique_ptr<ErrorScopeTracker> mErrorScopeTracker;
std::unique_ptr<FenceSignalTracker> mFenceSignalTracker;
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
index be92ea09591..6fb8eef6851 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.cpp
@@ -28,7 +28,8 @@ namespace dawn_native {
mDevice->GetPendingCommandSerial());
}
- ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize, Serial serial) {
+ ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
+ Serial serial) {
// Disable further sub-allocation should the request be too large.
if (allocationSize > kRingBufferSize) {
std::unique_ptr<StagingBufferBase> stagingBuffer;
@@ -108,4 +109,21 @@ namespace dawn_native {
}
mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
}
-} // namespace dawn_native \ No newline at end of file
+
+ // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
+ // when it's not necessary.
+ ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
+ Serial serial,
+ uint64_t offsetAlignment) {
+ ASSERT(offsetAlignment > 0);
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ AllocateInternal(allocationSize + offsetAlignment - 1, serial));
+ uint64_t additionalOffset =
+ Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
+ uploadHandle.mappedBuffer =
+ static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
+ uploadHandle.startOffset += additionalOffset;
+ return uploadHandle;
+ }
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
index 8210b035b22..a652bd8853f 100644
--- a/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
+++ b/chromium/third_party/dawn/src/dawn_native/DynamicUploader.h
@@ -40,7 +40,9 @@ namespace dawn_native {
// implemented.
void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
- ResultOrError<UploadHandle> Allocate(uint64_t allocationSize, Serial serial);
+ ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
+ Serial serial,
+ uint64_t offsetAlignment);
void Deallocate(Serial lastCompletedSerial);
private:
@@ -51,6 +53,8 @@ namespace dawn_native {
RingBufferAllocator mAllocator;
};
+ ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, Serial serial);
+
std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
SerialQueue<std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
DeviceBase* mDevice;
diff --git a/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h b/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h
new file mode 100644
index 00000000000..3227cd2db0f
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/EnumClassBitmasks.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMCLASSBITMASK_H_
+#define DAWNNATIVE_ENUMCLASSBITMASK_H_
+
+#include "dawn/EnumClassBitmasks.h"
+
+namespace dawn_native {
+
+ // EnumClassBitmmasks is a WebGPU helper in the wgpu:: namespace.
+ // Re-export it in the dawn_native namespace.
+
+ // Specify this for usage with EnumMaskIterator
+ template <typename T>
+ struct EnumBitmaskSize {
+ static constexpr unsigned value = 0;
+ };
+
+ using wgpu::operator|;
+ using wgpu::operator&;
+ using wgpu::operator^;
+ using wgpu::operator~;
+ using wgpu::operator&=;
+ using wgpu::operator|=;
+ using wgpu::operator^=;
+
+ using wgpu::HasZeroOrOneBits;
+
+ template <typename T>
+ constexpr bool HasOneBit(T value) {
+ return HasZeroOrOneBits(value) && value != T(0);
+ }
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_ENUMCLASSBITMASK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h b/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h
new file mode 100644
index 00000000000..85fde1032dc
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/EnumMaskIterator.h
@@ -0,0 +1,80 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_ENUMMASKITERATOR_H_
+#define DAWNNATIVE_ENUMMASKITERATOR_H_
+
+#include "common/BitSetIterator.h"
+#include "dawn_native/EnumClassBitmasks.h"
+
+namespace dawn_native {
+
+ template <typename T>
+ class EnumMaskIterator final {
+ static constexpr size_t N = EnumBitmaskSize<T>::value;
+ static_assert(N > 0, "");
+
+ using U = std::underlying_type_t<T>;
+
+ public:
+ EnumMaskIterator(const T& mask) : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
+ }
+
+ class Iterator final {
+ public:
+ Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
+ }
+
+ Iterator& operator++() {
+ ++mIter;
+ return *this;
+ }
+
+ bool operator==(const Iterator& other) const {
+ return mIter == other.mIter;
+ }
+
+ bool operator!=(const Iterator& other) const {
+ return mIter != other.mIter;
+ }
+
+ T operator*() const {
+ U value = *mIter;
+ return static_cast<T>(U(1) << value);
+ }
+
+ private:
+ typename BitSetIterator<N, U>::Iterator mIter;
+ };
+
+ Iterator begin() const {
+ return Iterator(mBitSetIterator.begin());
+ }
+
+ Iterator end() const {
+ return Iterator(mBitSetIterator.end());
+ }
+
+ private:
+ BitSetIterator<N, U> mBitSetIterator;
+ };
+
+ template <typename T>
+ EnumMaskIterator<T> IterateEnumMask(const T& mask) {
+ return EnumMaskIterator<T>(mask);
+ }
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_ENUMMASKITERATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.cpp b/chromium/third_party/dawn/src/dawn_native/Format.cpp
index 9c7fa58a3eb..11b71d269b8 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Format.cpp
@@ -13,8 +13,10 @@
// limitations under the License.
#include "dawn_native/Format.h"
+
#include "dawn_native/Device.h"
#include "dawn_native/Extensions.h"
+#include "dawn_native/Texture.h"
#include <bitset>
@@ -58,25 +60,96 @@ namespace dawn_native {
}
bool Format::IsColor() const {
- return aspect == Aspect::Color;
+ return aspects == Aspect::Color;
}
bool Format::HasDepth() const {
- return aspect == Depth || aspect == DepthStencil;
+ return (aspects & Aspect::Depth) != 0;
}
bool Format::HasStencil() const {
- return aspect == Stencil || aspect == DepthStencil;
+ return (aspects & Aspect::Stencil) != 0;
}
bool Format::HasDepthOrStencil() const {
- return aspect != Color;
+ return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
}
bool Format::HasComponentType(Type componentType) const {
return componentType == type;
}
+ TexelBlockInfo Format::GetTexelBlockInfo(wgpu::TextureAspect aspect) const {
+ switch (aspect) {
+ case wgpu::TextureAspect::All:
+ switch (aspects) {
+ case Aspect::Color:
+ case Aspect::Depth:
+ case Aspect::Stencil:
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return *this;
+
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(HasDepth());
+ switch (format) {
+ case wgpu::TextureFormat::Depth32Float:
+ return *this;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(HasStencil());
+ switch (format) {
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ return {1, 1, 1};
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ TexelBlockInfo Format::GetTexelBlockInfo(Aspect aspect) const {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(aspects & aspect);
+ switch (aspect) {
+ case Aspect::Color:
+ ASSERT(aspects == aspect);
+ return *this;
+ case Aspect::Depth:
+ switch (format) {
+ case wgpu::TextureFormat::Depth32Float:
+ return *this;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ case Aspect::Stencil:
+ switch (format) {
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ return {1, 1, 1};
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
size_t Format::GetIndex() const {
return ComputeFormatIndex(format);
}
@@ -98,7 +171,6 @@ namespace dawn_native {
std::bitset<kKnownFormatCount> formatsSet;
using Type = Format::Type;
- using Aspect = Format::Aspect;
auto AddFormat = [&table, &formatsSet](Format format) {
size_t index = ComputeFormatIndex(format.format);
@@ -121,7 +193,7 @@ namespace dawn_native {
internalFormat.isCompressed = false;
internalFormat.isSupported = true;
internalFormat.supportsStorageUsage = supportsStorageUsage;
- internalFormat.aspect = Aspect::Color;
+ internalFormat.aspects = Aspect::Color;
internalFormat.type = type;
internalFormat.blockByteSize = byteSize;
internalFormat.blockWidth = 1;
@@ -129,7 +201,7 @@ namespace dawn_native {
AddFormat(internalFormat);
};
- auto AddDepthStencilFormat = [&AddFormat](wgpu::TextureFormat format, Format::Aspect aspect,
+ auto AddDepthStencilFormat = [&AddFormat](wgpu::TextureFormat format, Aspect aspects,
uint32_t byteSize) {
Format internalFormat;
internalFormat.format = format;
@@ -137,7 +209,7 @@ namespace dawn_native {
internalFormat.isCompressed = false;
internalFormat.isSupported = true;
internalFormat.supportsStorageUsage = false;
- internalFormat.aspect = aspect;
+ internalFormat.aspects = aspects;
internalFormat.type = Type::Other;
internalFormat.blockByteSize = byteSize;
internalFormat.blockWidth = 1;
@@ -153,7 +225,7 @@ namespace dawn_native {
internalFormat.isCompressed = false;
internalFormat.isSupported = true;
internalFormat.supportsStorageUsage = false;
- internalFormat.aspect = Aspect::Depth;
+ internalFormat.aspects = Aspect::Depth;
internalFormat.type = type;
internalFormat.blockByteSize = byteSize;
internalFormat.blockWidth = 1;
@@ -169,7 +241,7 @@ namespace dawn_native {
internalFormat.isCompressed = true;
internalFormat.isSupported = isSupported;
internalFormat.supportsStorageUsage = false;
- internalFormat.aspect = Aspect::Color;
+ internalFormat.aspects = Aspect::Color;
internalFormat.type = Type::Float;
internalFormat.blockByteSize = byteSize;
internalFormat.blockWidth = width;
@@ -210,7 +282,8 @@ namespace dawn_native {
AddColorFormat(wgpu::TextureFormat::BGRA8UnormSrgb, true, false, 4, Type::Float);
AddColorFormat(wgpu::TextureFormat::RGB10A2Unorm, true, false, 4, Type::Float);
- AddColorFormat(wgpu::TextureFormat::RG11B10Float, false, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RG11B10Ufloat, false, false, 4, Type::Float);
+ AddColorFormat(wgpu::TextureFormat::RGB9E5Ufloat, false, false, 4, Type::Float);
// 8 bytes color formats
AddColorFormat(wgpu::TextureFormat::RG32Uint, true, true, 8, Type::Uint);
@@ -232,7 +305,8 @@ namespace dawn_native {
AddDepthStencilFormat(wgpu::TextureFormat::Depth24Plus, Aspect::Depth, 4);
// TODO(cwallez@chromium.org): It isn't clear if this format should be copyable
// because its size isn't well defined, is it 4, 5 or 8?
- AddDepthStencilFormat(wgpu::TextureFormat::Depth24PlusStencil8, Aspect::DepthStencil, 4);
+ AddDepthStencilFormat(wgpu::TextureFormat::Depth24PlusStencil8,
+ Aspect::Depth | Aspect::Stencil, 4);
// BC compressed formats
bool isBCFormatSupported = device->IsExtensionEnabled(Extension::TextureCompressionBC);
diff --git a/chromium/third_party/dawn/src/dawn_native/Format.h b/chromium/third_party/dawn/src/dawn_native/Format.h
index 82b40d81cce..15c6c311a32 100644
--- a/chromium/third_party/dawn/src/dawn_native/Format.h
+++ b/chromium/third_party/dawn/src/dawn_native/Format.h
@@ -17,28 +17,31 @@
#include "dawn_native/dawn_platform.h"
+#include "common/ityp_bitset.h"
#include "dawn_native/Error.h"
+#include "dawn_native/EnumClassBitmasks.h"
+
#include <array>
namespace dawn_native {
+ enum class Aspect : uint8_t;
class DeviceBase;
+ struct TexelBlockInfo {
+ uint32_t blockByteSize;
+ uint32_t blockWidth;
+ uint32_t blockHeight;
+ };
+
// The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
// exact number of known format.
- static constexpr size_t kKnownFormatCount = 52;
+ static constexpr size_t kKnownFormatCount = 53;
// A wgpu::TextureFormat along with all the information about it necessary for validation.
- struct Format {
- enum Aspect {
- Color,
- Depth,
- Stencil,
- DepthStencil,
- };
-
- enum Type {
+ struct Format : TexelBlockInfo {
+ enum class Type {
Float,
Sint,
Uint,
@@ -51,12 +54,8 @@ namespace dawn_native {
// A format can be known but not supported because it is part of a disabled extension.
bool isSupported;
bool supportsStorageUsage;
- Aspect aspect;
Type type;
-
- uint32_t blockByteSize;
- uint32_t blockWidth;
- uint32_t blockHeight;
+ Aspect aspects;
static Type TextureComponentTypeToFormatType(wgpu::TextureComponentType componentType);
static wgpu::TextureComponentType FormatTypeToTextureComponentType(Type type);
@@ -67,6 +66,9 @@ namespace dawn_native {
bool HasDepthOrStencil() const;
bool HasComponentType(Type componentType) const;
+ TexelBlockInfo GetTexelBlockInfo(wgpu::TextureAspect aspect) const;
+ TexelBlockInfo GetTexelBlockInfo(Aspect aspect) const;
+
// The index of the format in the list of all known formats: a unique number for each format
// in [0, kKnownFormatCount)
size_t GetIndex() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.cpp b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
index c7a1e310f30..49db4b60fc5 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.cpp
@@ -145,7 +145,7 @@ namespace dawn_native {
# if defined(DAWN_ENABLE_SWIFTSHADER)
Register(vulkan::Connect(this, true), wgpu::BackendType::Vulkan);
# endif // defined(DAWN_ENABLE_SWIFTSHADER)
-#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
+#endif // defined(DAWN_ENABLE_BACKEND_VULKAN)
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
Register(opengl::Connect(this), wgpu::BackendType::OpenGL);
#endif // defined(DAWN_ENABLE_BACKEND_OPENGL)
@@ -202,6 +202,14 @@ namespace dawn_native {
return mEnableBackendValidation;
}
+ void InstanceBase::EnableGPUBasedBackendValidation(bool enableGPUBasedBackendValidation) {
+ mEnableGPUValidation = enableGPUBasedBackendValidation;
+ }
+
+ bool InstanceBase::IsGPUBasedBackendValidationEnabled() const {
+ return mEnableGPUValidation;
+ }
+
void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
mBeginCaptureOnStartup = beginCaptureOnStartup;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Instance.h b/chromium/third_party/dawn/src/dawn_native/Instance.h
index 0ade98b4e60..14d4eef2ff1 100644
--- a/chromium/third_party/dawn/src/dawn_native/Instance.h
+++ b/chromium/third_party/dawn/src/dawn_native/Instance.h
@@ -60,6 +60,9 @@ namespace dawn_native {
void EnableBackendValidation(bool enableBackendValidation);
bool IsBackendValidationEnabled() const;
+ void EnableGPUBasedBackendValidation(bool enableGPUBasedBackendValidation);
+ bool IsGPUBasedBackendValidationEnabled() const;
+
void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
bool IsBeginCaptureOnStartupEnabled() const;
@@ -88,6 +91,7 @@ namespace dawn_native {
bool mEnableBackendValidation = false;
bool mBeginCaptureOnStartup = false;
+ bool mEnableGPUValidation = false;
dawn_platform::Platform* mPlatform = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp
index 8f33e023110..91af10bcece 100644
--- a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.cpp
@@ -27,11 +27,11 @@ namespace dawn_native {
ASSERT(mInflightRequests.Empty());
}
- void MapRequestTracker::Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite) {
+ void MapRequestTracker::Track(BufferBase* buffer, uint32_t mapSerial, MapType type) {
Request request;
request.buffer = buffer;
request.mapSerial = mapSerial;
- request.isWrite = isWrite;
+ request.type = type;
mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
@@ -39,8 +39,8 @@ namespace dawn_native {
void MapRequestTracker::Tick(Serial finishedSerial) {
for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
- request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.isWrite);
+ request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.type);
}
mInflightRequests.ClearUpTo(finishedSerial);
}
-} // namespace dawn_native \ No newline at end of file
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h
index 0dffca18f3d..1daf47dad92 100644
--- a/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h
+++ b/chromium/third_party/dawn/src/dawn_native/MapRequestTracker.h
@@ -20,12 +20,15 @@
namespace dawn_native {
+ // TODO(dawn:22) remove this enum once MapReadAsync/MapWriteAsync are removed.
+ enum class MapType : uint32_t { Read, Write, Async };
+
class MapRequestTracker {
public:
MapRequestTracker(DeviceBase* device);
~MapRequestTracker();
- void Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite);
+ void Track(BufferBase* buffer, uint32_t mapSerial, MapType type);
void Tick(Serial finishedSerial);
private:
@@ -34,11 +37,11 @@ namespace dawn_native {
struct Request {
Ref<BufferBase> buffer;
uint32_t mapSerial;
- bool isWrite;
+ MapType type;
};
SerialQueue<Request> mInflightRequests;
};
} // namespace dawn_native
-#endif // DAWNNATIVE_MAPREQUESTTRACKER_H \ No newline at end of file
+#endif // DAWNNATIVE_MAPREQUESTTRACKER_H
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
index 9271114bc58..60dcd219a6d 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsage.h
@@ -23,6 +23,7 @@
namespace dawn_native {
class BufferBase;
+ class QuerySetBase;
class TextureBase;
enum class PassType { Render, Compute };
@@ -45,8 +46,8 @@ namespace dawn_native {
// the vector to record every single subresource's Usages. The texture usage is enough. And we
// can decompress texture usage to a vector if necessary.
struct PassTextureUsage {
- wgpu::TextureUsage usage;
- bool sameUsagesAcrossSubresources;
+ wgpu::TextureUsage usage = wgpu::TextureUsage::None;
+ bool sameUsagesAcrossSubresources = true;
std::vector<wgpu::TextureUsage> subresourceUsages;
};
@@ -68,6 +69,7 @@ namespace dawn_native {
PerPassUsages perPass;
std::set<BufferBase*> topLevelBuffers;
std::set<TextureBase*> topLevelTextures;
+ std::set<QuerySetBase*> usedQuerySets;
};
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
index f5e4a5664e0..3f5a9159dd8 100644
--- a/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PassResourceUsageTracker.cpp
@@ -15,6 +15,8 @@
#include "dawn_native/PassResourceUsageTracker.h"
#include "dawn_native/Buffer.h"
+#include "dawn_native/EnumMaskIterator.h"
+#include "dawn_native/Format.h"
#include "dawn_native/Texture.h"
namespace dawn_native {
@@ -30,10 +32,7 @@ namespace dawn_native {
void PassResourceUsageTracker::TextureViewUsedAs(TextureViewBase* view,
wgpu::TextureUsage usage) {
TextureBase* texture = view->GetTexture();
- uint32_t baseMipLevel = view->GetBaseMipLevel();
- uint32_t levelCount = view->GetLevelCount();
- uint32_t baseArrayLayer = view->GetBaseArrayLayer();
- uint32_t layerCount = view->GetLayerCount();
+ const SubresourceRange& range = view->GetSubresourceRange();
// std::map's operator[] will create the key and return a PassTextureUsage with usage = 0
// and an empty vector for subresourceUsages.
@@ -42,20 +41,25 @@ namespace dawn_native {
// Set parameters for the whole texture
textureUsage.usage |= usage;
- uint32_t subresourceCount = texture->GetSubresourceCount();
- textureUsage.sameUsagesAcrossSubresources = levelCount * layerCount == subresourceCount;
+ textureUsage.sameUsagesAcrossSubresources &=
+ (range.levelCount == texture->GetNumMipLevels() && //
+ range.layerCount == texture->GetArrayLayers() && //
+ range.aspects == texture->GetFormat().aspects);
// Set usages for subresources
if (!textureUsage.subresourceUsages.size()) {
- textureUsage.subresourceUsages =
- std::vector<wgpu::TextureUsage>(subresourceCount, wgpu::TextureUsage::None);
+ textureUsage.subresourceUsages = std::vector<wgpu::TextureUsage>(
+ texture->GetSubresourceCount(), wgpu::TextureUsage::None);
}
- for (uint32_t arrayLayer = baseArrayLayer; arrayLayer < baseArrayLayer + layerCount;
- ++arrayLayer) {
- for (uint32_t mipLevel = baseMipLevel; mipLevel < baseMipLevel + levelCount;
- ++mipLevel) {
- uint32_t subresourceIndex = texture->GetSubresourceIndex(mipLevel, arrayLayer);
- textureUsage.subresourceUsages[subresourceIndex] |= usage;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex =
+ texture->GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ textureUsage.subresourceUsages[subresourceIndex] |= usage;
+ }
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
index df344416a56..ae05c02f4b4 100644
--- a/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Pipeline.cpp
@@ -91,25 +91,13 @@ namespace dawn_native {
}
BindGroupIndex groupIndex(groupIndexIn);
+
+ BindGroupLayoutBase* bgl = nullptr;
if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
- // Get or create an empty bind group layout.
- // TODO(enga): Consider caching this object on the Device and reusing it.
- // Today, this can't be done correctly because of the order of Device destruction.
- // For example, vulkan::~Device will be called before ~DeviceBase. If DeviceBase owns
- // a Ref<BindGroupLayoutBase>, then the VkDevice will be destroyed before the
- // VkDescriptorSetLayout.
- BindGroupLayoutDescriptor desc = {};
- desc.entryCount = 0;
- desc.entries = nullptr;
-
- BindGroupLayoutBase* bgl = nullptr;
- if (GetDevice()->ConsumedError(GetDevice()->GetOrCreateBindGroupLayout(&desc), &bgl)) {
- return BindGroupLayoutBase::MakeError(GetDevice());
- }
- return bgl;
+ bgl = GetDevice()->GetEmptyBindGroupLayout();
+ } else {
+ bgl = mLayout->GetBindGroupLayout(groupIndex);
}
-
- BindGroupLayoutBase* bgl = mLayout->GetBindGroupLayout(groupIndex);
bgl->Reference();
return bgl;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
index def4875c445..f34003a5b7a 100644
--- a/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/PipelineLayout.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/BitSetIterator.h"
#include "common/HashUtils.h"
+#include "common/ityp_stack_vec.h"
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/Device.h"
#include "dawn_native/ShaderModule.h"
@@ -70,24 +71,14 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("too many bind group layouts");
}
- uint32_t totalDynamicUniformBufferCount = 0;
- uint32_t totalDynamicStorageBufferCount = 0;
+ BindingCounts bindingCounts = {};
for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
- totalDynamicUniformBufferCount +=
- descriptor->bindGroupLayouts[i]->GetDynamicUniformBufferCount();
- totalDynamicStorageBufferCount +=
- descriptor->bindGroupLayouts[i]->GetDynamicStorageBufferCount();
- }
-
- if (totalDynamicUniformBufferCount > kMaxDynamicUniformBufferCount) {
- return DAWN_VALIDATION_ERROR("too many dynamic uniform buffers in pipeline layout");
- }
-
- if (totalDynamicStorageBufferCount > kMaxDynamicStorageBufferCount) {
- return DAWN_VALIDATION_ERROR("too many dynamic storage buffers in pipeline layout");
+ AccumulateBindingCounts(&bindingCounts,
+ descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
}
+ DAWN_TRY(ValidateBindingCounts(bindingCounts));
return {};
}
@@ -128,9 +119,10 @@ namespace dawn_native {
ASSERT(count > 0);
// Data which BindGroupLayoutDescriptor will point to for creation
- ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, BindGroupLayoutEntry, kMaxBindingsPerGroup>,
- kMaxBindGroups>
+ ityp::array<
+ BindGroupIndex,
+ ityp::stack_vec<BindingIndex, BindGroupLayoutEntry, kMaxOptimalBindingsPerGroup>,
+ kMaxBindGroups>
entryData = {};
// A map of bindings to the index in |entryData|
@@ -140,6 +132,7 @@ namespace dawn_native {
// A counter of how many bindings we've populated in |entryData|
ityp::array<BindGroupIndex, BindingIndex, kMaxBindGroups> entryCounts = {};
+ BindingCounts bindingCounts = {};
BindGroupIndex bindGroupLayoutCount(0);
for (uint32_t moduleIndex = 0; moduleIndex < count; ++moduleIndex) {
const ShaderModuleBase* module = modules[moduleIndex];
@@ -201,7 +194,9 @@ namespace dawn_native {
}
}
+ IncrementBindingCounts(&bindingCounts, bindingSlot);
BindingIndex currentBindingCount = entryCounts[group];
+ entryData[group].resize(currentBindingCount + BindingIndex(1));
entryData[group][currentBindingCount] = bindingSlot;
usedBindingsMap[group][bindingNumber] = currentBindingCount;
@@ -214,6 +209,8 @@ namespace dawn_native {
}
}
+ DAWN_TRY(ValidateBindingCounts(bindingCounts));
+
ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bindGroupLayouts = {};
for (BindGroupIndex group(0); group < bindGroupLayoutCount; ++group) {
BindGroupLayoutDescriptor desc = {};
@@ -222,7 +219,10 @@ namespace dawn_native {
// We should never produce a bad descriptor.
ASSERT(!ValidateBindGroupLayoutDescriptor(device, &desc).IsError());
- DAWN_TRY_ASSIGN(bindGroupLayouts[group], device->GetOrCreateBindGroupLayout(&desc));
+
+ Ref<BindGroupLayoutBase> bgl;
+ DAWN_TRY_ASSIGN(bgl, device->GetOrCreateBindGroupLayout(&desc));
+ bindGroupLayouts[group] = bgl.Detach();
}
PipelineLayoutDescriptor desc = {};
diff --git a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp
new file mode 100644
index 00000000000..5ba502e8fbf
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.cpp
@@ -0,0 +1,60 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/PooledResourceMemoryAllocator.h"
+#include "dawn_native/Device.h"
+
+namespace dawn_native {
+
+ PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
+ ResourceHeapAllocator* heapAllocator)
+ : mHeapAllocator(heapAllocator) {
+ }
+
+ void PooledResourceMemoryAllocator::DestroyPool() {
+ for (auto& resourceHeap : mPool) {
+ ASSERT(resourceHeap != nullptr);
+ mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
+ }
+
+ mPool.clear();
+ }
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>>
+ PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
+ // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
+ // pooling is disabled in-frame when the memory is still pending. For high in-frame
+ // memory users, FIFO might be preferable when memory consumption is a higher priority.
+ std::unique_ptr<ResourceHeapBase> memory;
+ if (!mPool.empty()) {
+ memory = std::move(mPool.front());
+ mPool.pop_front();
+ }
+
+ if (memory == nullptr) {
+ DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
+ }
+
+ return std::move(memory);
+ }
+
+ void PooledResourceMemoryAllocator::DeallocateResourceHeap(
+ std::unique_ptr<ResourceHeapBase> allocation) {
+ mPool.push_front(std::move(allocation));
+ }
+
+ uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
+ return mPool.size();
+ }
+} // namespace dawn_native \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h
new file mode 100644
index 00000000000..5b6b816ee6a
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/PooledResourceMemoryAllocator.h
@@ -0,0 +1,53 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+#define DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
+
+#include "common/SerialQueue.h"
+#include "dawn_native/ResourceHeapAllocator.h"
+
+#include <deque>
+
+namespace dawn_native {
+
+ class DeviceBase;
+
+ // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
+ // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
+ // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
+ // the pool and made AVAILABLE.
+ class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
+ public:
+ PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
+ ~PooledResourceMemoryAllocator() override = default;
+
+ ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+ uint64_t size) override;
+ void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+
+ void DestroyPool();
+
+ // For testing purposes.
+ uint64_t GetPoolSizeForTesting() const;
+
+ private:
+ ResourceHeapAllocator* mHeapAllocator = nullptr;
+
+ std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
+ };
+
+} // namespace dawn_native
+
+#endif // DAWNNATIVE_POOLEDRESOURCEMEMORYALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
index 513658db4c7..bd65ed05baa 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.cpp
@@ -65,11 +65,11 @@ namespace dawn_native {
"PipelineStatistics");
}
- std::set<wgpu::PipelineStatisticsName> pipelineStatisticsSet;
+ std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
- DAWN_TRY(ValidatePipelineStatisticsName(descriptor->pipelineStatistics[i]));
+ DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
- std::pair<std::set<wgpu::PipelineStatisticsName>::iterator, bool> res =
+ std::pair<std::set<wgpu::PipelineStatisticName>::iterator, bool> res =
pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
if (!res.second) {
return DAWN_VALIDATION_ERROR("Duplicate pipeline statistics found");
@@ -127,10 +127,18 @@ namespace dawn_native {
return mQueryCount;
}
- const std::vector<wgpu::PipelineStatisticsName>& QuerySetBase::GetPipelineStatistics() const {
+ const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
return mPipelineStatistics;
}
+ MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
+ ASSERT(!IsError());
+ if (mState == QuerySetState::Destroyed) {
+ return DAWN_VALIDATION_ERROR("Destroyed query set used in a submit");
+ }
+ return {};
+ }
+
void QuerySetBase::Destroy() {
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
diff --git a/chromium/third_party/dawn/src/dawn_native/QuerySet.h b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
index 7883678ffb0..a8f4deace6a 100644
--- a/chromium/third_party/dawn/src/dawn_native/QuerySet.h
+++ b/chromium/third_party/dawn/src/dawn_native/QuerySet.h
@@ -33,7 +33,9 @@ namespace dawn_native {
wgpu::QueryType GetQueryType() const;
uint32_t GetQueryCount() const;
- const std::vector<wgpu::PipelineStatisticsName>& GetPipelineStatistics() const;
+ const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
+
+ MaybeError ValidateCanUseInSubmitNow() const;
void Destroy();
@@ -50,7 +52,7 @@ namespace dawn_native {
wgpu::QueryType mQueryType;
uint32_t mQueryCount;
- std::vector<wgpu::PipelineStatisticsName> mPipelineStatistics;
+ std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
enum class QuerySetState { Unavailable, Available, Destroyed };
QuerySetState mState = QuerySetState::Unavailable;
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.cpp b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
index 3dcf2b0e469..eb0b053e7d3 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.cpp
@@ -14,14 +14,17 @@
#include "dawn_native/Queue.h"
+#include "common/Constants.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h"
+#include "dawn_native/CommandValidation.h"
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorScope.h"
#include "dawn_native/ErrorScopeTracker.h"
#include "dawn_native/Fence.h"
#include "dawn_native/FenceSignalTracker.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/Texture.h"
#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
@@ -49,24 +52,11 @@ namespace dawn_native {
}
void QueueBase::Submit(uint32_t commandCount, CommandBufferBase* const* commands) {
- DeviceBase* device = GetDevice();
- if (device->ConsumedError(device->ValidateIsAlive())) {
- // If device is lost, don't let any commands be submitted
- return;
- }
+ SubmitInternal(commandCount, commands);
- TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
- if (device->IsValidationEnabled() &&
- device->ConsumedError(ValidateSubmit(commandCount, commands))) {
- return;
- }
- ASSERT(!IsError());
-
- if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
- return;
+ for (uint32_t i = 0; i < commandCount; ++i) {
+ commands[i]->Destroy();
}
- device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
- device->GetCurrentErrorScope());
}
void QueueBase::Signal(Fence* fence, uint64_t signalValue) {
@@ -121,7 +111,8 @@ namespace dawn_native {
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
- size, device->GetPendingCommandSerial()));
+ size, device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
ASSERT(uploadHandle.mappedBuffer != nullptr);
memcpy(uploadHandle.mappedBuffer, data, size);
@@ -130,6 +121,37 @@ namespace dawn_native {
buffer, bufferOffset, size);
}
+ void QueueBase::WriteTexture(const TextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) {
+ GetDevice()->ConsumedError(
+ WriteTextureInternal(destination, data, dataSize, dataLayout, writeSize));
+ }
+
+ MaybeError QueueBase::WriteTextureInternal(const TextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) {
+ DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
+
+ if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depth == 0) {
+ return {};
+ }
+
+ return WriteTextureImpl(*destination, data, *dataLayout, *writeSize);
+ }
+
+ MaybeError QueueBase::WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSize) {
+ // TODO(tommek@google.com): This should be implemented.
+ return {};
+ }
+
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
CommandBufferBase* const* commands) const {
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
@@ -137,6 +159,7 @@ namespace dawn_native {
for (uint32_t i = 0; i < commandCount; ++i) {
DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
+ DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
@@ -155,6 +178,9 @@ namespace dawn_native {
for (const TextureBase* texture : usages.topLevelTextures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow());
}
+ for (const QuerySetBase* querySet : usages.usedQuerySets) {
+ DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+ }
}
return {};
@@ -208,7 +234,102 @@ namespace dawn_native {
return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
}
- return buffer->ValidateCanUseOnQueueNow();
+ DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+
+ return {};
}
+ MaybeError QueueBase::ValidateWriteTexture(const TextureCopyView* destination,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) const {
+ DAWN_TRY(GetDevice()->ValidateIsAlive());
+ DAWN_TRY(GetDevice()->ValidateObject(this));
+ DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+ DAWN_TRY(ValidateTextureCopyView(GetDevice(), *destination));
+
+ if (dataLayout->offset > dataSize) {
+ return DAWN_VALIDATION_ERROR("Queue::WriteTexture out of range");
+ }
+
+ if (!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst)) {
+ return DAWN_VALIDATION_ERROR("Texture needs the CopyDst usage bit");
+ }
+
+ if (destination->texture->GetSampleCount() > 1) {
+ return DAWN_VALIDATION_ERROR("The sample count of textures must be 1");
+ }
+
+ // We validate texture copy range before validating linear texture data,
+ // because in the latter we divide copyExtent.width by blockWidth and
+ // copyExtent.height by blockHeight while the divisibility conditions are
+ // checked in validating texture copy range.
+ DAWN_TRY(ValidateTextureCopyRange(*destination, *writeSize));
+ DAWN_TRY(ValidateBufferToTextureCopyRestrictions(*destination));
+ DAWN_TRY(ValidateLinearTextureData(
+ *dataLayout, dataSize,
+ destination->texture->GetFormat().GetTexelBlockInfo(destination->aspect), *writeSize));
+
+ DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
+
+ return {};
+ }
+
+ void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
+ DeviceBase* device = GetDevice();
+ if (device->ConsumedError(device->ValidateIsAlive())) {
+ // If device is lost, don't let any commands be submitted
+ return;
+ }
+
+ TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+ if (device->IsValidationEnabled() &&
+ device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+ return;
+ }
+ ASSERT(!IsError());
+
+ if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+ return;
+ }
+
+ device->GetErrorScopeTracker()->TrackUntilLastSubmitComplete(
+ device->GetCurrentErrorScope());
+ }
+
+ void CopyTextureData(uint8_t* dstPointer,
+ const uint8_t* srcPointer,
+ uint32_t depth,
+ uint32_t rowsPerImageInBlock,
+ uint64_t imageAdditionalStride,
+ uint32_t actualBytesPerRow,
+ uint32_t dstBytesPerRow,
+ uint32_t srcBytesPerRow) {
+ bool copyWholeLayer =
+ actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
+ bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
+
+ if (!copyWholeLayer) { // copy row by row
+ for (uint32_t d = 0; d < depth; ++d) {
+ for (uint32_t h = 0; h < rowsPerImageInBlock; ++h) {
+ memcpy(dstPointer, srcPointer, actualBytesPerRow);
+ dstPointer += dstBytesPerRow;
+ srcPointer += srcBytesPerRow;
+ }
+ srcPointer += imageAdditionalStride;
+ }
+ } else {
+ uint64_t layerSize = uint64_t(rowsPerImageInBlock) * actualBytesPerRow;
+ if (!copyWholeData) { // copy layer by layer
+ for (uint32_t d = 0; d < depth; ++d) {
+ memcpy(dstPointer, srcPointer, layerSize);
+ dstPointer += layerSize;
+ srcPointer += layerSize + imageAdditionalStride;
+ }
+ } else { // do a single copy
+ memcpy(dstPointer, srcPointer, layerSize * depth);
+ }
+ }
+ }
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Queue.h b/chromium/third_party/dawn/src/dawn_native/Queue.h
index 5fd722ddf5e..be1ce62b573 100644
--- a/chromium/third_party/dawn/src/dawn_native/Queue.h
+++ b/chromium/third_party/dawn/src/dawn_native/Queue.h
@@ -34,6 +34,11 @@ namespace dawn_native {
void Signal(Fence* fence, uint64_t signalValue);
Fence* CreateFence(const FenceDescriptor* descriptor);
void WriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
+ void WriteTexture(const TextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize);
private:
QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
@@ -42,12 +47,21 @@ namespace dawn_native {
uint64_t bufferOffset,
const void* data,
size_t size);
+ MaybeError WriteTextureInternal(const TextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize);
virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands);
virtual MaybeError WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size);
+ virtual MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSize);
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue) const;
@@ -55,8 +69,24 @@ namespace dawn_native {
MaybeError ValidateWriteBuffer(const BufferBase* buffer,
uint64_t bufferOffset,
size_t size) const;
+ MaybeError ValidateWriteTexture(const TextureCopyView* destination,
+ size_t dataSize,
+ const TextureDataLayout* dataLayout,
+ const Extent3D* writeSize) const;
+
+ void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
};
+ // A helper function used in Queue::WriteTexture. The destination data layout must not
+ // contain any additional rows per image.
+ void CopyTextureData(uint8_t* dstPointer,
+ const uint8_t* srcPointer,
+ uint32_t depth,
+ uint32_t rowsPerImageInBlock,
+ uint64_t imageAdditionalStride,
+ uint32_t actualBytesPerRow,
+ uint32_t dstBytesPerRow,
+ uint32_t srcBytesPerRow);
} // namespace dawn_native
#endif // DAWNNATIVE_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
index e2dc4db7fce..e51b52f7662 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.cpp
@@ -17,8 +17,10 @@
#include "common/Constants.h"
#include "dawn_native/Buffer.h"
#include "dawn_native/CommandEncoder.h"
+#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
+#include "dawn_native/QuerySet.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/RenderPipeline.h"
@@ -163,4 +165,21 @@ namespace dawn_native {
});
}
+ void RenderPassEncoder::WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+ mEncodingContext->TryEncode(this, [&](CommandAllocator* allocator) -> MaybeError {
+ if (GetDevice()->IsValidationEnabled()) {
+ DAWN_TRY(GetDevice()->ValidateObject(querySet));
+ DAWN_TRY(ValidateTimestampQuery(querySet, queryIndex));
+ mCommandEncoder->TrackUsedQuerySet(querySet);
+ }
+
+ WriteTimestampCmd* cmd =
+ allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+ cmd->querySet = querySet;
+ cmd->queryIndex = queryIndex;
+
+ return {};
+ });
+ }
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
index cd9ac017fb7..d5a2f7aebbc 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPassEncoder.h
@@ -46,6 +46,8 @@ namespace dawn_native {
void SetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
void ExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+ void WriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+
protected:
RenderPassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder,
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
index ceeb1eb2fc9..87c9f62e65e 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.cpp
@@ -366,12 +366,8 @@ namespace dawn_native {
DAWN_TRY(ValidateDepthStencilStateDescriptor(device, descriptor->depthStencilState));
}
- if (descriptor->sampleMask != 0xFFFFFFFF) {
- return DAWN_VALIDATION_ERROR("sampleMask must be 0xFFFFFFFF (for now)");
- }
-
- if (descriptor->alphaToCoverageEnabled) {
- return DAWN_VALIDATION_ERROR("alphaToCoverageEnabled isn't supported (yet)");
+ if (descriptor->alphaToCoverageEnabled && descriptor->sampleCount <= 1) {
+ return DAWN_VALIDATION_ERROR("Enabling alphaToCoverage requires sampleCount > 1");
}
return {};
@@ -571,6 +567,16 @@ namespace dawn_native {
return mAttachmentState->GetSampleCount();
}
+ uint32_t RenderPipelineBase::GetSampleMask() const {
+ ASSERT(!IsError());
+ return mSampleMask;
+ }
+
+ bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
+ ASSERT(!IsError());
+ return mAlphaToCoverageEnabled;
+ }
+
const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
ASSERT(!IsError());
diff --git a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
index 9cc76dea82d..bdc3af0fd9a 100644
--- a/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
+++ b/chromium/third_party/dawn/src/dawn_native/RenderPipeline.h
@@ -76,6 +76,8 @@ namespace dawn_native {
wgpu::TextureFormat GetColorAttachmentFormat(uint32_t attachment) const;
wgpu::TextureFormat GetDepthStencilFormat() const;
uint32_t GetSampleCount() const;
+ uint32_t GetSampleMask() const;
+ bool IsAlphaToCoverageEnabled() const;
const AttachmentState* GetAttachmentState() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h b/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
index 6e6af795122..e9a4a672263 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceHeap.h
@@ -28,4 +28,4 @@ namespace dawn_native {
} // namespace dawn_native
-#endif // DAWNNATIVE_RESOURCEHEAP_H_ \ No newline at end of file
+#endif // DAWNNATIVE_RESOURCEHEAP_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
index 0d726c9d66c..b1c35d41727 100644
--- a/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ResourceMemoryAllocation.cpp
@@ -50,4 +50,4 @@ namespace dawn_native {
mResourceHeap = nullptr;
mInfo = {};
}
-} // namespace dawn_native \ No newline at end of file
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
index e437632d7c4..50b0f15d001 100644
--- a/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
+++ b/chromium/third_party/dawn/src/dawn_native/RingBufferAllocator.h
@@ -56,4 +56,4 @@ namespace dawn_native {
};
} // namespace dawn_native
-#endif // DAWNNATIVE_RINGBUFFERALLOCATOR_H_ \ No newline at end of file
+#endif // DAWNNATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
index 04b6dfc2079..6057e70d6eb 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.cpp
@@ -39,14 +39,14 @@ namespace dawn_native {
Format::Type SpirvCrossBaseTypeToFormatType(spirv_cross::SPIRType::BaseType spirvBaseType) {
switch (spirvBaseType) {
case spirv_cross::SPIRType::Float:
- return Format::Float;
+ return Format::Type::Float;
case spirv_cross::SPIRType::Int:
- return Format::Sint;
+ return Format::Type::Sint;
case spirv_cross::SPIRType::UInt:
- return Format::Uint;
+ return Format::Type::Uint;
default:
UNREACHABLE();
- return Format::Other;
+ return Format::Type::Other;
}
}
@@ -194,7 +194,7 @@ namespace dawn_native {
case spv::ImageFormatRgb10A2:
return wgpu::TextureFormat::RGB10A2Unorm;
case spv::ImageFormatR11fG11fB10f:
- return wgpu::TextureFormat::RG11B10Float;
+ return wgpu::TextureFormat::RG11B10Ufloat;
case spv::ImageFormatRg32f:
return wgpu::TextureFormat::RG32Float;
case spv::ImageFormatRg32ui:
@@ -265,7 +265,7 @@ namespace dawn_native {
case shaderc_spvc_storage_texture_format_rgb10a2unorm:
return wgpu::TextureFormat::RGB10A2Unorm;
case shaderc_spvc_storage_texture_format_rg11b10float:
- return wgpu::TextureFormat::RG11B10Float;
+ return wgpu::TextureFormat::RG11B10Ufloat;
case shaderc_spvc_storage_texture_format_rg32float:
return wgpu::TextureFormat::RG32Float;
case shaderc_spvc_storage_texture_format_rg32uint:
@@ -295,6 +295,82 @@ namespace dawn_native {
<< " binding " << static_cast<uint32_t>(binding);
return ostream.str();
}
+
+#ifdef DAWN_ENABLE_WGSL
+ tint::ast::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+ switch (format) {
+ case wgpu::VertexFormat::UChar2:
+ return tint::ast::transform::VertexFormat::kVec2U8;
+ case wgpu::VertexFormat::UChar4:
+ return tint::ast::transform::VertexFormat::kVec4U8;
+ case wgpu::VertexFormat::Char2:
+ return tint::ast::transform::VertexFormat::kVec2I8;
+ case wgpu::VertexFormat::Char4:
+ return tint::ast::transform::VertexFormat::kVec4I8;
+ case wgpu::VertexFormat::UChar2Norm:
+ return tint::ast::transform::VertexFormat::kVec2U8Norm;
+ case wgpu::VertexFormat::UChar4Norm:
+ return tint::ast::transform::VertexFormat::kVec4U8Norm;
+ case wgpu::VertexFormat::Char2Norm:
+ return tint::ast::transform::VertexFormat::kVec2I8Norm;
+ case wgpu::VertexFormat::Char4Norm:
+ return tint::ast::transform::VertexFormat::kVec4I8Norm;
+ case wgpu::VertexFormat::UShort2:
+ return tint::ast::transform::VertexFormat::kVec2U16;
+ case wgpu::VertexFormat::UShort4:
+ return tint::ast::transform::VertexFormat::kVec4U16;
+ case wgpu::VertexFormat::Short2:
+ return tint::ast::transform::VertexFormat::kVec2I16;
+ case wgpu::VertexFormat::Short4:
+ return tint::ast::transform::VertexFormat::kVec4I16;
+ case wgpu::VertexFormat::UShort2Norm:
+ return tint::ast::transform::VertexFormat::kVec2U16Norm;
+ case wgpu::VertexFormat::UShort4Norm:
+ return tint::ast::transform::VertexFormat::kVec4U16Norm;
+ case wgpu::VertexFormat::Short2Norm:
+ return tint::ast::transform::VertexFormat::kVec2I16Norm;
+ case wgpu::VertexFormat::Short4Norm:
+ return tint::ast::transform::VertexFormat::kVec4I16Norm;
+ case wgpu::VertexFormat::Half2:
+ return tint::ast::transform::VertexFormat::kVec2F16;
+ case wgpu::VertexFormat::Half4:
+ return tint::ast::transform::VertexFormat::kVec4F16;
+ case wgpu::VertexFormat::Float:
+ return tint::ast::transform::VertexFormat::kF32;
+ case wgpu::VertexFormat::Float2:
+ return tint::ast::transform::VertexFormat::kVec2F32;
+ case wgpu::VertexFormat::Float3:
+ return tint::ast::transform::VertexFormat::kVec3F32;
+ case wgpu::VertexFormat::Float4:
+ return tint::ast::transform::VertexFormat::kVec4F32;
+ case wgpu::VertexFormat::UInt:
+ return tint::ast::transform::VertexFormat::kU32;
+ case wgpu::VertexFormat::UInt2:
+ return tint::ast::transform::VertexFormat::kVec2U32;
+ case wgpu::VertexFormat::UInt3:
+ return tint::ast::transform::VertexFormat::kVec3U32;
+ case wgpu::VertexFormat::UInt4:
+ return tint::ast::transform::VertexFormat::kVec4U32;
+ case wgpu::VertexFormat::Int:
+ return tint::ast::transform::VertexFormat::kI32;
+ case wgpu::VertexFormat::Int2:
+ return tint::ast::transform::VertexFormat::kVec2I32;
+ case wgpu::VertexFormat::Int3:
+ return tint::ast::transform::VertexFormat::kVec3I32;
+ case wgpu::VertexFormat::Int4:
+ return tint::ast::transform::VertexFormat::kVec4I32;
+ }
+ }
+
+ tint::ast::transform::InputStepMode ToTintInputStepMode(wgpu::InputStepMode mode) {
+ switch (mode) {
+ case wgpu::InputStepMode::Vertex:
+ return tint::ast::transform::InputStepMode::kVertex;
+ case wgpu::InputStepMode::Instance:
+ return tint::ast::transform::InputStepMode::kInstance;
+ }
+ }
+#endif
} // anonymous namespace
MaybeError ValidateSpirv(DeviceBase*, const uint32_t* code, uint32_t codeSize) {
@@ -357,7 +433,7 @@ namespace dawn_native {
}
tint::Validator validator;
- if (!validator.Validate(module)) {
+ if (!validator.Validate(&module)) {
errorStream << "Validation: " << validator.error() << std::endl;
return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
}
@@ -400,6 +476,75 @@ namespace dawn_native {
std::vector<uint32_t> spirv = generator.result();
return std::move(spirv);
}
+
+ ResultOrError<std::vector<uint32_t>> ConvertWGSLToSPIRVWithPulling(
+ const char* source,
+ const VertexStateDescriptor& vertexState,
+ const std::string& entryPoint,
+ uint32_t pullingBufferBindingSet) {
+ std::ostringstream errorStream;
+ errorStream << "Tint WGSL->SPIR-V failure:" << std::endl;
+
+ tint::Context context;
+ tint::reader::wgsl::Parser parser(&context, source);
+
+ // TODO: This is a duplicate parse with ValidateWGSL, need to store
+ // state between calls to avoid this.
+ if (!parser.Parse()) {
+ errorStream << "Parser: " << parser.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::ast::Module module = parser.module();
+ if (!module.IsValid()) {
+ errorStream << "Invalid module generated..." << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::ast::transform::VertexPullingTransform transform(&context, &module);
+ auto state = std::make_unique<tint::ast::transform::VertexStateDescriptor>();
+ for (uint32_t i = 0; i < vertexState.vertexBufferCount; ++i) {
+ auto& vertexBuffer = vertexState.vertexBuffers[i];
+ tint::ast::transform::VertexBufferLayoutDescriptor layout;
+ layout.array_stride = vertexBuffer.arrayStride;
+ layout.step_mode = ToTintInputStepMode(vertexBuffer.stepMode);
+
+ for (uint32_t j = 0; j < vertexBuffer.attributeCount; ++j) {
+ auto& attribute = vertexBuffer.attributes[j];
+ tint::ast::transform::VertexAttributeDescriptor attr;
+ attr.format = ToTintVertexFormat(attribute.format);
+ attr.offset = attribute.offset;
+ attr.shader_location = attribute.shaderLocation;
+
+ layout.attributes.push_back(std::move(attr));
+ }
+
+ state->vertex_buffers.push_back(std::move(layout));
+ }
+ transform.SetVertexState(std::move(state));
+ transform.SetEntryPoint(entryPoint);
+ transform.SetPullingBufferBindingSet(pullingBufferBindingSet);
+
+ if (!transform.Run()) {
+ errorStream << "Vertex pulling transform: " << transform.GetError();
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::TypeDeterminer type_determiner(&context, &module);
+ if (!type_determiner.Determine()) {
+ errorStream << "Type Determination: " << type_determiner.error();
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ tint::writer::spirv::Generator generator(std::move(module));
+ if (!generator.Generate()) {
+ errorStream << "Generator: " << generator.error() << std::endl;
+ return DAWN_VALIDATION_ERROR(errorStream.str().c_str());
+ }
+
+ std::vector<uint32_t> spirv = generator.result();
+ return std::move(spirv);
+ }
#endif // DAWN_ENABLE_WGSL
MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
@@ -463,7 +608,7 @@ namespace dawn_native {
UNREACHABLE();
}
- mFragmentOutputFormatBaseTypes.fill(Format::Other);
+ mFragmentOutputFormatBaseTypes.fill(Format::Type::Other);
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvcParser)) {
mSpvcContext.SetUseSpvcParser(true);
}
@@ -1094,9 +1239,22 @@ namespace dawn_native {
return mSpirv;
}
+#ifdef DAWN_ENABLE_WGSL
+ ResultOrError<std::vector<uint32_t>> ShaderModuleBase::GeneratePullingSpirv(
+ const VertexStateDescriptor& vertexState,
+ const std::string& entryPoint,
+ uint32_t pullingBufferBindingSet) const {
+ return ConvertWGSLToSPIRVWithPulling(mWgsl.c_str(), vertexState, entryPoint,
+ pullingBufferBindingSet);
+ }
+#endif
+
shaderc_spvc::CompileOptions ShaderModuleBase::GetCompileOptions() const {
shaderc_spvc::CompileOptions options;
options.SetValidate(GetDevice()->IsValidationEnabled());
+ options.SetRobustBufferAccessPass(GetDevice()->IsRobustnessEnabled());
+ options.SetSourceEnvironment(shaderc_target_env_vulkan, shaderc_env_version_vulkan_1_1);
+ options.SetTargetEnvironment(shaderc_target_env_vulkan, shaderc_env_version_vulkan_1_1);
return options;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
index 4e771aa8335..336551ee85e 100644
--- a/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
+++ b/chromium/third_party/dawn/src/dawn_native/ShaderModule.h
@@ -91,6 +91,13 @@ namespace dawn_native {
shaderc_spvc::Context* GetContext();
const std::vector<uint32_t>& GetSpirv() const;
+#ifdef DAWN_ENABLE_WGSL
+ ResultOrError<std::vector<uint32_t>> GeneratePullingSpirv(
+ const VertexStateDescriptor& vertexState,
+ const std::string& entryPoint,
+ uint32_t pullingBufferBindingSet) const;
+#endif
+
protected:
static MaybeError CheckSpvcSuccess(shaderc_spvc_status status, const char* error_msg);
shaderc_spvc::CompileOptions GetCompileOptions() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp
index 51f5fa8c5e5..63dd65e9bf7 100644
--- a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.cpp
@@ -26,4 +26,4 @@ namespace dawn_native {
void* StagingBufferBase::GetMappedPointer() const {
return mMappedPointer;
}
-} // namespace dawn_native \ No newline at end of file
+} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h
index 1da8900a128..4d195488bc9 100644
--- a/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h
+++ b/chromium/third_party/dawn/src/dawn_native/StagingBuffer.h
@@ -38,4 +38,4 @@ namespace dawn_native {
} // namespace dawn_native
-#endif // DAWNNATIVE_STAGINGBUFFER_H_ \ No newline at end of file
+#endif // DAWNNATIVE_STAGINGBUFFER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/Surface.cpp b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
index 382bb71c6a5..ccd240c9dfc 100644
--- a/chromium/third_party/dawn/src/dawn_native/Surface.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Surface.cpp
@@ -99,7 +99,7 @@ namespace dawn_native {
}
#endif // defined(DAWN_USE_X11)
- case wgpu::SType::SurfaceDescriptorFromHTMLCanvasId:
+ case wgpu::SType::SurfaceDescriptorFromCanvasHTMLSelector:
default:
return DAWN_VALIDATION_ERROR("Unsupported sType");
}
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.cpp b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
index f029981fb04..8f93b46ddf9 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.cpp
@@ -20,6 +20,7 @@
#include "common/Constants.h"
#include "common/Math.h"
#include "dawn_native/Device.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/PassResourceUsage.h"
#include "dawn_native/ValidationUtils_autogen.h"
@@ -200,6 +201,11 @@ namespace dawn_native {
return {};
}
+ uint8_t GetPlaneIndex(Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ return static_cast<uint8_t>(Log2(static_cast<uint32_t>(aspect)));
+ }
+
} // anonymous namespace
MaybeError ValidateTextureDescriptor(const DeviceBase* device,
@@ -327,25 +333,6 @@ namespace dawn_native {
return desc;
}
- ResultOrError<TextureDescriptor> FixTextureDescriptor(DeviceBase* device,
- const TextureDescriptor* desc) {
- TextureDescriptor fixedDesc = *desc;
-
- if (desc->arrayLayerCount != 1) {
- if (desc->size.depth != 1) {
- return DAWN_VALIDATION_ERROR("arrayLayerCount and size.depth cannot both be != 1");
- } else {
- fixedDesc.size.depth = fixedDesc.arrayLayerCount;
- fixedDesc.arrayLayerCount = 1;
- device->EmitDeprecationWarning(
- "wgpu::TextureDescriptor::arrayLayerCount is deprecated in favor of "
- "::size::depth");
- }
- }
-
- return {std::move(fixedDesc)};
- }
-
bool IsValidSampleCount(uint32_t sampleCount) {
switch (sampleCount) {
case 1:
@@ -357,10 +344,33 @@ namespace dawn_native {
}
}
+ Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
+ Aspect aspectMask = ConvertAspect(format, aspect);
+ ASSERT(HasOneBit(aspectMask));
+ return aspectMask;
+ }
+
+ Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
+ switch (aspect) {
+ case wgpu::TextureAspect::All:
+ return format.aspects;
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(format.aspects & Aspect::Depth);
+ return Aspect::Depth;
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(format.aspects & Aspect::Stencil);
+ return Aspect::Stencil;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
// static
- SubresourceRange SubresourceRange::SingleSubresource(uint32_t baseMipLevel,
- uint32_t baseArrayLayer) {
- return {baseMipLevel, 1, baseArrayLayer, 1};
+ SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects) {
+ return {baseMipLevel, 1, baseArrayLayer, 1, aspects};
}
// TextureBase
@@ -376,7 +386,13 @@ namespace dawn_native {
mSampleCount(descriptor->sampleCount),
mUsage(descriptor->usage),
mState(state) {
- uint32_t subresourceCount = GetSubresourceCount();
+ uint8_t planeIndex = 0;
+ for (Aspect aspect : IterateEnumMask(mFormat.aspects)) {
+ mPlaneIndices[GetPlaneIndex(aspect)] = planeIndex++;
+ }
+
+ uint32_t subresourceCount =
+ mMipLevelCount * mSize.depth * static_cast<uint32_t>(planeIndex);
mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
// Add readonly storage usage if the texture has a storage usage. The validation rules in
@@ -438,7 +454,7 @@ namespace dawn_native {
}
SubresourceRange TextureBase::GetAllSubresources() const {
ASSERT(!IsError());
- return {0, mMipLevelCount, 0, GetArrayLayers()};
+ return {0, mMipLevelCount, 0, GetArrayLayers(), mFormat.aspects};
}
uint32_t TextureBase::GetSampleCount() const {
ASSERT(!IsError());
@@ -446,7 +462,7 @@ namespace dawn_native {
}
uint32_t TextureBase::GetSubresourceCount() const {
ASSERT(!IsError());
- return mMipLevelCount * mSize.depth;
+ return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
}
wgpu::TextureUsage TextureBase::GetUsage() const {
ASSERT(!IsError());
@@ -458,25 +474,32 @@ namespace dawn_native {
return mState;
}
- uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice) const {
+ uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
+ uint32_t arraySlice,
+ Aspect aspect) const {
ASSERT(arraySlice <= kMaxTexture2DArrayLayers);
ASSERT(mipLevel <= kMaxTexture2DMipLevels);
+ ASSERT(HasOneBit(aspect));
static_assert(kMaxTexture2DMipLevels <=
std::numeric_limits<uint32_t>::max() / kMaxTexture2DArrayLayers,
"texture size overflows uint32_t");
- return GetNumMipLevels() * arraySlice + mipLevel;
+ return mipLevel +
+ GetNumMipLevels() *
+ (arraySlice + GetArrayLayers() * mPlaneIndices[GetPlaneIndex(aspect)]);
}
bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
ASSERT(!IsError());
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
- return false;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
+ return false;
+ }
}
}
}
@@ -486,13 +509,15 @@ namespace dawn_native {
void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
const SubresourceRange& range) {
ASSERT(!IsError());
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = range.baseMipLevel;
- mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
- uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer);
- ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
- mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = range.baseMipLevel;
+ mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+ uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+ mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+ }
}
}
}
@@ -541,6 +566,19 @@ namespace dawn_native {
return extent;
}
+ Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const {
+ const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
+ uint32_t clampedCopyExtentWidth = (origin.x + extent.width > virtualSizeAtLevel.width)
+ ? (virtualSizeAtLevel.width - origin.x)
+ : extent.width;
+ uint32_t clampedCopyExtentHeight = (origin.y + extent.height > virtualSizeAtLevel.height)
+ ? (virtualSizeAtLevel.height - origin.y)
+ : extent.height;
+ return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depth};
+ }
+
TextureViewBase* TextureBase::CreateView(const TextureViewDescriptor* descriptor) {
return GetDevice()->CreateTextureView(this, descriptor);
}
@@ -571,10 +609,13 @@ namespace dawn_native {
TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
: ObjectBase(texture->GetDevice()),
mTexture(texture),
+ mAspect(descriptor->aspect),
mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
mDimension(descriptor->dimension),
mRange({descriptor->baseMipLevel, descriptor->mipLevelCount, descriptor->baseArrayLayer,
- descriptor->arrayLayerCount}) {
+ descriptor->arrayLayerCount, ConvertAspect(mFormat, mAspect)}) {
+ // TODO(crbug.com/dawn/439): Current validation only allows texture views with aspect "all".
+ ASSERT(mAspect == wgpu::TextureAspect::All);
}
TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
@@ -596,6 +637,11 @@ namespace dawn_native {
return mTexture.Get();
}
+ wgpu::TextureAspect TextureViewBase::GetAspect() const {
+ ASSERT(!IsError());
+ return mAspect;
+ }
+
const Format& TextureViewBase::GetFormat() const {
ASSERT(!IsError());
return mFormat;
@@ -630,4 +676,5 @@ namespace dawn_native {
ASSERT(!IsError());
return mRange;
}
+
} // namespace dawn_native
diff --git a/chromium/third_party/dawn/src/dawn_native/Texture.h b/chromium/third_party/dawn/src/dawn_native/Texture.h
index 29007941344..252f11f64a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/Texture.h
+++ b/chromium/third_party/dawn/src/dawn_native/Texture.h
@@ -15,6 +15,9 @@
#ifndef DAWNNATIVE_TEXTURE_H_
#define DAWNNATIVE_TEXTURE_H_
+#include "common/ityp_array.h"
+#include "common/ityp_bitset.h"
+#include "dawn_native/EnumClassBitmasks.h"
#include "dawn_native/Error.h"
#include "dawn_native/Forward.h"
#include "dawn_native/ObjectBase.h"
@@ -24,6 +27,35 @@
#include <vector>
namespace dawn_native {
+
+ // Note: Subresource indices are computed by iterating the aspects in increasing order.
+ // D3D12 uses these directly, so the order much match D3D12's indices.
+ // - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
+ enum class Aspect : uint8_t {
+ None = 0x0,
+ Color = 0x1,
+ Depth = 0x2,
+ Stencil = 0x4,
+ };
+
+ template <>
+ struct EnumBitmaskSize<Aspect> {
+ static constexpr unsigned value = 3;
+ };
+
+} // namespace dawn_native
+
+namespace wgpu {
+
+ template <>
+ struct IsDawnBitmask<dawn_native::Aspect> {
+ static constexpr bool enable = true;
+ };
+
+} // namespace wgpu
+
+namespace dawn_native {
+
MaybeError ValidateTextureDescriptor(const DeviceBase* device,
const TextureDescriptor* descriptor);
MaybeError ValidateTextureViewDescriptor(const TextureBase* texture,
@@ -32,11 +64,6 @@ namespace dawn_native {
const TextureBase* texture,
const TextureViewDescriptor* descriptor);
- // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
- // GPUTextureDescriptor.size.depth is done.
- ResultOrError<TextureDescriptor> FixTextureDescriptor(DeviceBase* device,
- const TextureDescriptor* desc);
-
bool IsValidSampleCount(uint32_t sampleCount);
static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
@@ -46,13 +73,19 @@ namespace dawn_native {
wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::Storage |
wgpu::TextureUsage::OutputAttachment;
+ Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
+ Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
+
struct SubresourceRange {
uint32_t baseMipLevel;
uint32_t levelCount;
uint32_t baseArrayLayer;
uint32_t layerCount;
+ Aspect aspects;
- static SubresourceRange SingleSubresource(uint32_t baseMipLevel, uint32_t baseArrayLayer);
+ static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
+ uint32_t baseArrayLayer,
+ Aspect aspects);
};
class TextureBase : public ObjectBase {
@@ -76,7 +109,7 @@ namespace dawn_native {
uint32_t GetSubresourceCount() const;
wgpu::TextureUsage GetUsage() const;
TextureState GetTextureState() const;
- uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice) const;
+ uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
@@ -91,6 +124,9 @@ namespace dawn_native {
// required to be a multiple of the block size and used in texture sampling.
Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
Extent3D GetMipLevelVirtualSize(uint32_t level) const;
+ Extent3D ClampToMipLevelVirtualSize(uint32_t level,
+ const Origin3D& origin,
+ const Extent3D& extent) const;
// Dawn API
TextureViewBase* CreateView(const TextureViewDescriptor* descriptor);
@@ -115,6 +151,7 @@ namespace dawn_native {
// TODO(natlee@microsoft.com): Use a more optimized data structure to save space
std::vector<bool> mIsSubresourceContentInitializedAtIndex;
+ std::array<uint8_t, EnumBitmaskSize<Aspect>::value> mPlaneIndices;
};
class TextureViewBase : public ObjectBase {
@@ -126,6 +163,7 @@ namespace dawn_native {
const TextureBase* GetTexture() const;
TextureBase* GetTexture();
+ wgpu::TextureAspect GetAspect() const;
const Format& GetFormat() const;
wgpu::TextureViewDimension GetDimension() const;
uint32_t GetBaseMipLevel() const;
@@ -139,6 +177,7 @@ namespace dawn_native {
Ref<TextureBase> mTexture;
+ wgpu::TextureAspect mAspect;
// TODO(cwallez@chromium.org): This should be deduplicated in the Device
const Format& mFormat;
wgpu::TextureViewDimension mDimension;
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
index b4ec0db0e31..d54dc094bac 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.cpp
@@ -29,109 +29,120 @@ namespace dawn_native {
using ToggleEnumAndInfoList =
std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
- static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
- {Toggle::EmulateStoreAndMSAAResolve,
- {"emulate_store_and_msaa_resolve",
- "Emulate storing into multisampled color attachments and doing MSAA resolve "
- "simultaneously. This workaround is enabled by default on the Metal drivers that do "
- "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
- "those platforms, we should do MSAA resolve in another render pass after ending the "
- "previous one.",
- "https://crbug.com/dawn/56"}},
- {Toggle::NonzeroClearResourcesOnCreationForTesting,
- {"nonzero_clear_resources_on_creation_for_testing",
- "Clears texture to full 1 bits as soon as they are created, but doesn't update "
- "the tracking state of the texture. This way we can test the logic of clearing "
- "textures that use recycled memory.",
- "https://crbug.com/dawn/145"}},
- {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
- {"always_resolve_into_zero_level_and_layer",
- "When the resolve target is a texture view that is created on the non-zero level or "
- "layer of a texture, we first resolve into a temporarily 2D texture with only one "
- "mipmap level and one array layer, and copy the result of MSAA resolve into the "
- "true resolve target. This workaround is enabled by default on the Metal drivers "
- "that have bugs when setting non-zero resolveLevel or resolveSlice.",
- "https://crbug.com/dawn/56"}},
- {Toggle::LazyClearResourceOnFirstUse,
- {"lazy_clear_resource_on_first_use",
- "Clears resource to zero on first usage. This initializes the resource "
- "so that no dirty bits from recycled memory is present in the new resource.",
- "https://crbug.com/dawn/145"}},
- {Toggle::TurnOffVsync,
- {"turn_off_vsync",
- "Turn off vsync when rendering. In order to do performance test or run perf tests, "
- "turn off vsync so that the fps can exeed 60.",
- "https://crbug.com/dawn/237"}},
- {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
- {"use_temporary_buffer_in_texture_to_texture_copy",
- "Split texture-to-texture copy into two copies: copy from source texture into a "
- "temporary buffer, and copy from the temporary buffer into the destination texture "
- "when copying between compressed textures that don't have block-aligned sizes. This "
- "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
- "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
- "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
- "https://crbug.com/dawn/42"}},
- {Toggle::UseD3D12ResourceHeapTier2,
- {"use_d3d12_resource_heap_tier2",
- "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
- "texture and buffers in the same heap. This allows better heap re-use and reduces "
- "fragmentation.",
- "https://crbug.com/dawn/27"}},
- {Toggle::UseD3D12RenderPass,
- {"use_d3d12_render_pass",
- "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
- "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
- "will emulate a render pass.",
- "https://crbug.com/dawn/36"}},
- {Toggle::UseD3D12ResidencyManagement,
- {"use_d3d12_residency_management",
- "Enable residency management. This allows page-in and page-out of resource heaps in "
- "GPU memory. This component improves overcommitted performance by keeping the most "
- "recently used resources local to the GPU. Turning this component off can cause "
- "allocation failures when application memory exceeds physical device memory.",
- "https://crbug.com/dawn/193"}},
- {Toggle::SkipValidation,
- {"skip_validation", "Skip expensive validation of Dawn commands.",
- "https://crbug.com/dawn/271"}},
- {Toggle::UseSpvc,
- {"use_spvc",
- "Enable use of spvc for shader compilation, instead of accessing spirv_cross "
- "directly.",
- "https://crbug.com/dawn/288"}},
- {Toggle::UseSpvcParser,
- {"use_spvc_parser",
- "Enable usage of spvc's internal parsing and IR generation code, instead of "
- "spirv_cross's.",
- "https://crbug.com/dawn/288"}},
- {Toggle::VulkanUseD32S8,
- {"vulkan_use_d32s8",
- "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
- "backend will use D32S8 (toggle to on) but setting the toggle to off will make it"
- "use the D24S8 format when possible.",
- "https://crbug.com/dawn/286"}},
- {Toggle::MetalDisableSamplerCompare,
- {"metal_disable_sampler_compare",
- "Disables the use of sampler compare on Metal. This is unsupported before A9 "
- "processors.",
- "https://crbug.com/dawn/342"}},
- {Toggle::DisableBaseVertex,
- {"disable_base_vertex",
- "Disables the use of non-zero base vertex which is unsupported on some platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::DisableBaseInstance,
- {"disable_base_instance",
- "Disables the use of non-zero base instance which is unsupported on some "
- "platforms.",
- "https://crbug.com/dawn/343"}},
- {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
- {"use_d3d12_small_shader_visible_heap",
- "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
- "default. This setting is used to test bindgroup encoding.",
- "https://crbug.com/dawn/155"}},
- {Toggle::UseDXC,
- {"use_dxc", "Use DXC instead of FXC for compiling HLSL",
- "https://crbug.com/dawn/402"}},
- }};
+ static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {
+ {{Toggle::EmulateStoreAndMSAAResolve,
+ {"emulate_store_and_msaa_resolve",
+ "Emulate storing into multisampled color attachments and doing MSAA resolve "
+ "simultaneously. This workaround is enabled by default on the Metal drivers that do "
+ "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
+ "those platforms, we should do MSAA resolve in another render pass after ending the "
+ "previous one.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::NonzeroClearResourcesOnCreationForTesting,
+ {"nonzero_clear_resources_on_creation_for_testing",
+ "Clears texture to full 1 bits as soon as they are created, but doesn't update "
+ "the tracking state of the texture. This way we can test the logic of clearing "
+ "textures that use recycled memory.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+ {"always_resolve_into_zero_level_and_layer",
+ "When the resolve target is a texture view that is created on the non-zero level or "
+ "layer of a texture, we first resolve into a temporarily 2D texture with only one "
+ "mipmap level and one array layer, and copy the result of MSAA resolve into the "
+ "true resolve target. This workaround is enabled by default on the Metal drivers "
+ "that have bugs when setting non-zero resolveLevel or resolveSlice.",
+ "https://crbug.com/dawn/56"}},
+ {Toggle::LazyClearResourceOnFirstUse,
+ {"lazy_clear_resource_on_first_use",
+ "Clears resource to zero on first usage. This initializes the resource "
+ "so that no dirty bits from recycled memory is present in the new resource.",
+ "https://crbug.com/dawn/145"}},
+ {Toggle::TurnOffVsync,
+ {"turn_off_vsync",
+ "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+ "turn off vsync so that the fps can exeed 60.",
+ "https://crbug.com/dawn/237"}},
+ {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+ {"use_temporary_buffer_in_texture_to_texture_copy",
+ "Split texture-to-texture copy into two copies: copy from source texture into a "
+ "temporary buffer, and copy from the temporary buffer into the destination texture "
+ "when copying between compressed textures that don't have block-aligned sizes. This "
+ "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
+ "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
+ "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+ "https://crbug.com/dawn/42"}},
+ {Toggle::UseD3D12ResourceHeapTier2,
+ {"use_d3d12_resource_heap_tier2",
+ "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+ "texture and buffers in the same heap. This allows better heap re-use and reduces "
+ "fragmentation.",
+ "https://crbug.com/dawn/27"}},
+ {Toggle::UseD3D12RenderPass,
+ {"use_d3d12_render_pass",
+ "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+ "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+ "will emulate a render pass.",
+ "https://crbug.com/dawn/36"}},
+ {Toggle::UseD3D12ResidencyManagement,
+ {"use_d3d12_residency_management",
+ "Enable residency management. This allows page-in and page-out of resource heaps in "
+ "GPU memory. This component improves overcommitted performance by keeping the most "
+ "recently used resources local to the GPU. Turning this component off can cause "
+ "allocation failures when application memory exceeds physical device memory.",
+ "https://crbug.com/dawn/193"}},
+ {Toggle::SkipValidation,
+ {"skip_validation", "Skip expensive validation of Dawn commands.",
+ "https://crbug.com/dawn/271"}},
+ {Toggle::UseSpvc,
+ {"use_spvc",
+ "Enable use of spvc for shader compilation, instead of accessing spirv_cross "
+ "directly.",
+ "https://crbug.com/dawn/288"}},
+ {Toggle::UseSpvcParser,
+ {"use_spvc_parser",
+ "Enable usage of spvc's internal parsing and IR generation code, instead of "
+ "spirv_cross's.",
+ "https://crbug.com/dawn/288"}},
+ {Toggle::VulkanUseD32S8,
+ {"vulkan_use_d32s8",
+ "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
+ "backend will use D32S8 (toggle to on) but setting the toggle to off will make it"
+ "use the D24S8 format when possible.",
+ "https://crbug.com/dawn/286"}},
+ {Toggle::MetalDisableSamplerCompare,
+ {"metal_disable_sampler_compare",
+ "Disables the use of sampler compare on Metal. This is unsupported before A9 "
+ "processors.",
+ "https://crbug.com/dawn/342"}},
+ {Toggle::DisableBaseVertex,
+ {"disable_base_vertex",
+ "Disables the use of non-zero base vertex which is unsupported on some platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::DisableBaseInstance,
+ {"disable_base_instance",
+ "Disables the use of non-zero base instance which is unsupported on some "
+ "platforms.",
+ "https://crbug.com/dawn/343"}},
+ {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+ {"use_d3d12_small_shader_visible_heap",
+ "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
+ "default. This setting is used to test bindgroup encoding.",
+ "https://crbug.com/dawn/155"}},
+ {Toggle::UseDXC,
+ {"use_dxc", "Use DXC instead of FXC for compiling HLSL",
+ "https://crbug.com/dawn/402"}},
+ {Toggle::DisableRobustness,
+ {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
+ {Toggle::LazyClearBufferOnFirstUse,
+ {"lazy_clear_buffer_on_first_use",
+ "Clear buffers on their first use. This is a temporary toggle only for the "
+ "development of buffer lazy initialization and will be removed after buffer lazy "
+ "initialization is completely implemented.",
+ "https://crbug.com/dawn/414"}},
+ {Toggle::MetalEnableVertexPulling,
+ {"metal_enable_vertex_pulling",
+ "Uses vertex pulling to protect out-of-bounds reads on Metal",
+ "https://crbug.com/dawn/480"}}}};
} // anonymous namespace
diff --git a/chromium/third_party/dawn/src/dawn_native/Toggles.h b/chromium/third_party/dawn/src/dawn_native/Toggles.h
index 18a26a9a63d..5186cb56394 100644
--- a/chromium/third_party/dawn/src/dawn_native/Toggles.h
+++ b/chromium/third_party/dawn/src/dawn_native/Toggles.h
@@ -42,6 +42,9 @@ namespace dawn_native {
DisableBaseInstance,
UseD3D12SmallShaderVisibleHeapForTesting,
UseDXC,
+ DisableRobustness,
+ LazyClearBufferOnFirstUse,
+ MetalEnableVertexPulling,
EnumCount,
InvalidEnum = EnumCount,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
index 68fa27c2e5e..364e6c13e8f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/AdapterD3D12.cpp
@@ -102,6 +102,9 @@ namespace dawn_native { namespace d3d12 {
mSupportedExtensions.EnableExtension(Extension::TextureCompressionBC);
mSupportedExtensions.EnableExtension(Extension::PipelineStatisticsQuery);
mSupportedExtensions.EnableExtension(Extension::TimestampQuery);
+ if (mDeviceInfo.supportsShaderFloat16 && GetBackend()->GetFunctions()->IsDXCAvailable()) {
+ mSupportedExtensions.EnableExtension(Extension::ShaderFloat16);
+ }
}
MaybeError Adapter::InitializeDebugLayerFilters() {
@@ -141,12 +144,13 @@ namespace dawn_native { namespace d3d12 {
//
// Remove after warning have been addressed
- // https://crbug.com/dawn/419
- D3D12_MESSAGE_ID_UNMAP_RANGE_NOT_EMPTY,
-
- // Remove after warning have been addressed
// https://crbug.com/dawn/421
D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+
+ // For small placed resource alignment, we first request the small alignment, which may
+ // get rejected and generate a debug error. Then, we request 0 to get the allowed
+ // allowed alignment.
+ D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
};
// Create a retrieval filter with a deny list to suppress messages.
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
index 46dc1b67921..946ce7bd98a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.cpp
@@ -26,7 +26,8 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
bool enableBackendValidation,
- bool beginCaptureOnStartup) {
+ bool beginCaptureOnStartup,
+ bool enableGPUBasedBackendValidation) {
ComPtr<IDXGIFactory4> factory;
uint32_t dxgiFactoryFlags = 0;
@@ -39,7 +40,8 @@ namespace dawn_native { namespace d3d12 {
functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
ASSERT(debugController != nullptr);
debugController->EnableDebugLayer();
- debugController->SetEnableGPUBasedValidation(true);
+ debugController->SetEnableGPUBasedValidation(
+ enableGPUBasedBackendValidation);
// Enable additional debug layers.
dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
@@ -70,6 +72,18 @@ namespace dawn_native { namespace d3d12 {
return std::move(factory);
}
+ ResultOrError<std::unique_ptr<AdapterBase>> CreateAdapterFromIDXGIAdapter(
+ Backend* backend,
+ ComPtr<IDXGIAdapter> dxgiAdapter) {
+ ComPtr<IDXGIAdapter3> dxgiAdapter3;
+ DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
+ std::unique_ptr<Adapter> adapter =
+ std::make_unique<Adapter>(backend, std::move(dxgiAdapter3));
+ DAWN_TRY(adapter->Initialize());
+
+ return {std::move(adapter)};
+ }
+
} // anonymous namespace
Backend::Backend(InstanceBase* instance)
@@ -84,7 +98,8 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY_ASSIGN(mFactory,
CreateFactory(mFunctions.get(), instance->IsBackendValidationEnabled(),
- instance->IsBeginCaptureOnStartupEnabled()));
+ instance->IsBeginCaptureOnStartupEnabled(),
+ instance->IsGPUBasedBackendValidationEnabled()));
return {};
}
@@ -127,23 +142,34 @@ namespace dawn_native { namespace d3d12 {
}
ASSERT(dxgiAdapter != nullptr);
-
- ComPtr<IDXGIAdapter3> dxgiAdapter3;
- HRESULT result = dxgiAdapter.As(&dxgiAdapter3);
- ASSERT(SUCCEEDED(result));
-
- std::unique_ptr<Adapter> adapter =
- std::make_unique<Adapter>(this, std::move(dxgiAdapter3));
- if (GetInstance()->ConsumedError(adapter->Initialize())) {
+ ResultOrError<std::unique_ptr<AdapterBase>> adapter =
+ CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
+ if (adapter.IsError()) {
+ adapter.AcquireError();
continue;
}
- adapters.push_back(std::move(adapter));
+ adapters.push_back(std::move(adapter.AcquireSuccess()));
}
return adapters;
}
+ ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> Backend::DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) {
+ ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
+ const AdapterDiscoveryOptions* options =
+ static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+ ASSERT(options->dxgiAdapter != nullptr);
+
+ std::unique_ptr<AdapterBase> adapter;
+ DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
+ std::vector<std::unique_ptr<AdapterBase>> adapters;
+ adapters.push_back(std::move(adapter));
+ return std::move(adapters);
+ }
+
BackendConnection* Connect(InstanceBase* instance) {
Backend* backend = new Backend(instance);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
index 27ef1d16a36..87c2d13f04b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BackendD3D12.h
@@ -35,6 +35,8 @@ namespace dawn_native { namespace d3d12 {
const PlatformFunctions* GetFunctions() const;
std::vector<std::unique_ptr<AdapterBase>> DiscoverDefaultAdapters() override;
+ ResultOrError<std::vector<std::unique_ptr<AdapterBase>>> DiscoverAdapters(
+ const AdapterDiscoveryOptionsBase* optionsBase) override;
private:
// Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
@@ -47,4 +49,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_BACKENDD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_BACKENDD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
index 76fb0289796..21d2743e94f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupD3D12.cpp
@@ -88,7 +88,7 @@ namespace dawn_native { namespace d3d12 {
desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
d3d12Device->CreateUnorderedAccessView(
- ToBackend(binding.buffer)->GetD3D12Resource().Get(), nullptr, &desc,
+ ToBackend(binding.buffer)->GetD3D12Resource(), nullptr, &desc,
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
break;
}
@@ -108,7 +108,7 @@ namespace dawn_native { namespace d3d12 {
desc.Buffer.StructureByteStride = 0;
desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
d3d12Device->CreateShaderResourceView(
- ToBackend(binding.buffer)->GetD3D12Resource().Get(), &desc,
+ ToBackend(binding.buffer)->GetD3D12Resource(), &desc,
viewAllocation.OffsetFrom(viewSizeIncrement, bindingOffsets[bindingIndex]));
break;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
index 9280f8c5f4f..62722f6bd6d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.cpp
@@ -46,6 +46,7 @@ namespace dawn_native { namespace d3d12 {
BindGroupLayout::BindGroupLayout(Device* device, const BindGroupLayoutDescriptor* descriptor)
: BindGroupLayoutBase(device, descriptor),
+ mBindingOffsets(GetBindingCount()),
mDescriptorCounts{},
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
for (BindingIndex bindingIndex = GetDynamicBufferCount(); bindingIndex < GetBindingCount();
@@ -170,9 +171,8 @@ namespace dawn_native { namespace d3d12 {
mBindGroupAllocator.Deallocate(bindGroup);
}
- const ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>&
- BindGroupLayout::GetBindingOffsets() const {
- return mBindingOffsets;
+ ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetBindingOffsets() const {
+ return {mBindingOffsets.data(), mBindingOffsets.size()};
}
uint32_t BindGroupLayout::GetCbvUavSrvDescriptorTableSize() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
index 5a5ba8bce33..c5f32f54f3b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BindGroupLayoutD3D12.h
@@ -18,6 +18,7 @@
#include "dawn_native/BindGroupLayout.h"
#include "common/SlabAllocator.h"
+#include "common/ityp_stack_vec.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -44,7 +45,7 @@ namespace dawn_native { namespace d3d12 {
Count,
};
- const ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>& GetBindingOffsets() const;
+ ityp::span<BindingIndex, const uint32_t> GetBindingOffsets() const;
uint32_t GetCbvUavSrvDescriptorTableSize() const;
uint32_t GetSamplerDescriptorTableSize() const;
uint32_t GetCbvUavSrvDescriptorCount() const;
@@ -54,7 +55,7 @@ namespace dawn_native { namespace d3d12 {
private:
~BindGroupLayout() override = default;
- ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup> mBindingOffsets;
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mBindingOffsets;
std::array<uint32_t, DescriptorType::Count> mDescriptorCounts;
D3D12_DESCRIPTOR_RANGE mRanges[DescriptorType::Count];
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
index 879709f4bae..66991109990 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Math.h"
+#include "dawn_native/CommandBuffer.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/D3D12Error.h"
@@ -62,6 +63,13 @@ namespace dawn_native { namespace d3d12 {
if (usage & wgpu::BufferUsage::Indirect) {
resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
}
+ if (usage & wgpu::BufferUsage::QueryResolve) {
+ // D3D12_RESOURCE_STATE_COPY_DEST is required by ResolveQueryData but we also add
+ // D3D12_RESOURCE_STATE_UNORDERED_ACCESS because the queries will be post-processed
+ // by a compute shader and written to this buffer via a UAV.
+ resourceState |=
+ (D3D12_RESOURCE_STATE_UNORDERED_ACCESS | D3D12_RESOURCE_STATE_COPY_DEST);
+ }
return resourceState;
}
@@ -123,7 +131,11 @@ namespace dawn_native { namespace d3d12 {
ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- DAWN_TRY(ClearBuffer(ClearValue::NonZero));
+ CommandRecordingContext* commandRecordingContext;
+ DAWN_TRY_ASSIGN(commandRecordingContext,
+ ToBackend(GetDevice())->GetPendingCommandContext());
+
+ DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
}
return {};
@@ -133,7 +145,7 @@ namespace dawn_native { namespace d3d12 {
DestroyInternal();
}
- ComPtr<ID3D12Resource> Buffer::GetD3D12Resource() const {
+ ID3D12Resource* Buffer::GetD3D12Resource() const {
return mResourceAllocation.GetD3D12Resource();
}
@@ -183,7 +195,7 @@ namespace dawn_native { namespace d3d12 {
if (needsUAVBarrier) {
barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->UAV.pResource = GetD3D12Resource().Get();
+ barrier->UAV.pResource = GetD3D12Resource();
mLastUsage = newUsage;
return true;
@@ -227,7 +239,7 @@ namespace dawn_native { namespace d3d12 {
barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
- barrier->Transition.pResource = GetD3D12Resource().Get();
+ barrier->Transition.pResource = GetD3D12Resource();
barrier->Transition.StateBefore = lastState;
barrier->Transition.StateAfter = newState;
barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
@@ -239,79 +251,88 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocation.GetGPUPointer();
}
- bool Buffer::IsMapWritable() const {
+ bool Buffer::IsMappableAtCreation() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapBufferInternal(D3D12_RANGE mappedRange,
- void** mappedPointer,
- const char* contextInfo) {
+ MaybeError Buffer::MapInternal(bool isWrite,
+ size_t offset,
+ size_t size,
+ const char* contextInfo) {
// The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
// evicted. This buffer should already have been made resident when it was created.
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
- DAWN_TRY(
- CheckHRESULT(GetD3D12Resource()->Map(0, &mappedRange, mappedPointer), contextInfo));
+ D3D12_RANGE range = {offset, offset + size};
+ // mMappedData is the pointer to the start of the resource, irrespective of offset.
+ // MSDN says (note the weird use of "never"):
+ //
+ // When ppData is not NULL, the pointer returned is never offset by any values in
+ // pReadRange.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
+ DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
+
+ if (isWrite) {
+ mWrittenMappedRange = range;
+ }
+
return {};
}
- void Buffer::UnmapBufferInternal(D3D12_RANGE mappedRange) {
- GetD3D12Resource()->Unmap(0, &mappedRange);
-
- // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
- // them when they are unmapped.
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
- }
+ MaybeError Buffer::MapAtCreationImpl() {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+ DAWN_TRY(EnsureDataInitialized(commandContext));
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
- DAWN_TRY(MapBufferInternal(mWrittenMappedRange, reinterpret_cast<void**>(mappedPointer),
- "D3D12 map at creation"));
- mMappedData = reinterpret_cast<char*>(mappedPointer);
+ // Setting isMapWrite to false on MapRead buffers to silence D3D12 debug layer warning.
+ bool isMapWrite = (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+ DAWN_TRY(MapInternal(isMapWrite, 0, size_t(GetSize()), "D3D12 map at creation"));
return {};
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
- mWrittenMappedRange = {};
- D3D12_RANGE readRange = {0, static_cast<size_t>(GetSize())};
- DAWN_TRY(MapBufferInternal(readRange, reinterpret_cast<void**>(&mMappedData),
- "D3D12 map read async"));
+ MaybeError Buffer::MapReadAsyncImpl() {
+ return MapInternal(false, 0, size_t(GetSize()), "D3D12 map read async");
+ }
- // There is no need to transition the resource to a new state: D3D12 seems to make the GPU
- // writes available when the fence is passed.
- return {};
+ MaybeError Buffer::MapWriteAsyncImpl() {
+ return MapInternal(true, 0, size_t(GetSize()), "D3D12 map write async");
}
- MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
- mWrittenMappedRange = {0, static_cast<size_t>(GetSize())};
- DAWN_TRY(MapBufferInternal(mWrittenMappedRange, reinterpret_cast<void**>(&mMappedData),
- "D3D12 map write async"));
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+ DAWN_TRY(EnsureDataInitialized(commandContext));
- // There is no need to transition the resource to a new state: D3D12 seems to make the CPU
- // writes available on queue submission.
- return {};
+ return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
}
void Buffer::UnmapImpl() {
- UnmapBufferInternal(mWrittenMappedRange);
-
- mWrittenMappedRange = {};
+ GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
mMappedData = nullptr;
+ mWrittenMappedRange = {0, 0};
+
+ // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+ // them when they are unmapped.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
}
void* Buffer::GetMappedPointerImpl() {
+ // The frontend asks that the pointer returned is from the start of the resource
+ // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
return mMappedData;
}
void Buffer::DestroyImpl() {
- // We must ensure that if a mapped buffer is destroyed, it does not leave a dangling lock
- // reference on its heap.
- if (IsMapped()) {
- Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
- ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+ if (mMappedData != nullptr) {
+ // If the buffer is currently mapped, unmap without flushing the writes to the GPU
+ // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
+ // which parts to flush, so we set it to an empty range to prevent flushes.
+ mWrittenMappedRange = {0, 0};
+ UnmapImpl();
}
ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
@@ -326,37 +347,91 @@ namespace dawn_native { namespace d3d12 {
return mResourceAllocation.GetInfo().mMethod == allocationMethod;
}
- MaybeError Buffer::ClearBuffer(ClearValue clearValue) {
- // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
- ASSERT(clearValue == BufferBase::ClearValue::NonZero);
- constexpr uint8_t kClearBufferValue = 1u;
+ MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return {};
+ }
+
+ DAWN_TRY(InitializeToZero(commandContext));
+
+ return {};
+ }
+ MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return {};
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ } else {
+ DAWN_TRY(InitializeToZero(commandContext));
+ }
+
+ return {};
+ }
+
+ MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return {};
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ } else {
+ DAWN_TRY(InitializeToZero(commandContext));
+ }
+
+ return {};
+ }
+
+ MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
+ ASSERT(!IsDataInitialized());
+
+ // TODO(jiawei.shao@intel.com): skip initializing the buffer when it is created on a heap
+ // that has already been zero initialized.
+ DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+
+ return {};
+ }
+
+ MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue) {
Device* device = ToBackend(GetDevice());
// The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
// changed away, so we can only clear such buffer with buffer mapping.
if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
- uint8_t* mappedData = nullptr;
- D3D12_RANGE writeRange = {0, static_cast<size_t>(GetSize())};
- DAWN_TRY(MapBufferInternal(writeRange, reinterpret_cast<void**>(&mappedData),
- "D3D12 map at clear buffer"));
-
- memset(mappedData, kClearBufferValue, GetSize());
-
- UnmapBufferInternal(writeRange);
- mappedData = nullptr;
+ DAWN_TRY(MapInternal(true, 0, size_t(GetSize()), "D3D12 map at clear buffer"));
+ memset(mMappedData, clearValue, GetSize());
+ UnmapImpl();
} else {
// TODO(jiawei.shao@intel.com): use ClearUnorderedAccessView*() when the buffer usage
// includes STORAGE.
DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(GetSize(), device->GetPendingCommandSerial()));
+ uploader->Allocate(GetSize(), device->GetPendingCommandSerial(),
+ kCopyBufferToBufferOffsetAlignment));
- memset(uploadHandle.mappedBuffer, kClearBufferValue, GetSize());
+ memset(uploadHandle.mappedBuffer, clearValue, GetSize());
- DAWN_TRY(device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer,
- uploadHandle.startOffset, this, 0, GetSize()));
+ device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+ uploadHandle.startOffset, this, 0, GetSize());
}
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
index 3c51b743946..a2af099a79e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/BufferD3D12.h
@@ -32,7 +32,7 @@ namespace dawn_native { namespace d3d12 {
MaybeError Initialize();
- ComPtr<ID3D12Resource> GetD3D12Resource() const;
+ ID3D12Resource* GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
@@ -44,34 +44,41 @@ namespace dawn_native { namespace d3d12 {
bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
bool CheckIsResidentForTesting() const;
+ MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
+ MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
private:
~Buffer() override;
// Dawn API
- MaybeError MapReadAsyncImpl(uint32_t serial) override;
- MaybeError MapWriteAsyncImpl(uint32_t serial) override;
+ MaybeError MapReadAsyncImpl() override;
+ MaybeError MapWriteAsyncImpl() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
- bool IsMapWritable() const override;
- virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ bool IsMappableAtCreation() const override;
+ virtual MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
- MaybeError MapBufferInternal(D3D12_RANGE mappedRange,
- void** mappedPointer,
- const char* contextInfo);
- void UnmapBufferInternal(D3D12_RANGE mappedRange);
+ MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
D3D12_RESOURCE_BARRIER* barrier,
wgpu::BufferUsage newUsage);
- MaybeError ClearBuffer(ClearValue clearValue);
+ MaybeError InitializeToZero(CommandRecordingContext* commandContext);
+ MaybeError ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue);
ResourceHeapAllocation mResourceAllocation;
bool mFixedResourceState = false;
wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
Serial mLastUsedSerial = UINT64_MAX;
- D3D12_RANGE mWrittenMappedRange;
- char* mMappedData = nullptr;
+
+ D3D12_RANGE mWrittenMappedRange = {0, 0};
+ void* mMappedData = nullptr;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
index 021c47ab87c..d92398584d9 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
@@ -50,4 +50,4 @@ namespace dawn_native { namespace d3d12 {
mBaseDescriptor = {0};
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h
index 05aaf5185bd..51ae2fdb61f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CPUDescriptorHeapAllocationD3D12.h
@@ -44,4 +44,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
index a0df3b68730..8ca8fc27168 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/CommandEncoder.h"
#include "dawn_native/CommandValidation.h"
#include "dawn_native/Commands.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/RenderBundle.h"
#include "dawn_native/d3d12/BindGroupD3D12.h"
#include "dawn_native/d3d12/BindGroupLayoutD3D12.h"
@@ -28,6 +29,7 @@
#include "dawn_native/d3d12/DeviceD3D12.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
+#include "dawn_native/d3d12/QuerySetD3D12.h"
#include "dawn_native/d3d12/RenderPassBuilderD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/SamplerD3D12.h"
@@ -55,6 +57,19 @@ namespace dawn_native { namespace d3d12 {
}
}
+ D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_TYPE_TIMESTAMP;
+ default:
+ UNREACHABLE();
+ }
+ }
+
bool CanUseCopyResource(const Texture* src, const Texture* dst, const Extent3D& copySize) {
// Checked by validation
ASSERT(src->GetSampleCount() == dst->GetSampleCount());
@@ -80,9 +95,51 @@ namespace dawn_native { namespace d3d12 {
copySize.depth == srcSize.depth;
}
+ void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
+ const Texture2DCopySplit& baseCopySplit,
+ Buffer* buffer,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ Texture* texture,
+ uint32_t textureMiplevel,
+ uint32_t textureSlice,
+ Aspect aspect) {
+ const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice,
+ aspect);
+
+ const uint64_t offset = baseCopySplit.offset + baseOffset;
+
+ for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+ const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
+
+ // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+ // members in Texture2DCopySplit::CopyInfo.
+ const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
+ info.bufferSize, offset,
+ bufferBytesPerRow, aspect);
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+ info.bufferOffset.y, info.bufferOffset.z,
+ &textureLocation, &sourceRegion);
+ }
+ }
+
+ void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
+ WriteTimestampCmd* cmd) {
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
+ commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
+ cmd->queryIndex);
+ }
} // anonymous namespace
class BindGroupStateTracker : public BindGroupAndStorageBarrierTrackerBase<false, uint64_t> {
+ using Base = BindGroupAndStorageBarrierTrackerBase;
+
public:
BindGroupStateTracker(Device* device)
: BindGroupAndStorageBarrierTrackerBase(),
@@ -95,6 +152,17 @@ namespace dawn_native { namespace d3d12 {
mInCompute = inCompute_;
}
+ void OnSetPipeline(PipelineBase* pipeline) {
+ // Invalidate the root sampler tables previously set in the root signature.
+ // This is because changing the pipeline layout also changes the root signature.
+ const PipelineLayout* pipelineLayout = ToBackend(pipeline->GetLayout());
+ if (mLastAppliedPipelineLayout != pipelineLayout) {
+ mBoundRootSamplerTables = {};
+ }
+
+ Base::OnSetPipeline(pipeline);
+ }
+
MaybeError Apply(CommandRecordingContext* commandContext) {
// Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
// ringbuffer. There can be a single shader-visible descriptor heap of each type bound
@@ -309,10 +377,16 @@ namespace dawn_native { namespace d3d12 {
uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
group->GetBaseSamplerDescriptor();
- if (mInCompute) {
- commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
- } else {
- commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+ // Check if the group requires its sampler table to be set in the pipeline.
+ // This because sampler heap allocations could be cached and use the same table.
+ if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
+ if (mInCompute) {
+ commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+ } else {
+ commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
+ }
+
+ mBoundRootSamplerTables[index] = baseDescriptor;
}
}
}
@@ -321,6 +395,9 @@ namespace dawn_native { namespace d3d12 {
bool mInCompute = false;
+ ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
+ mBoundRootSamplerTables = {};
+
ShaderVisibleDescriptorAllocator* mViewAllocator;
ShaderVisibleDescriptorAllocator* mSamplerAllocator;
};
@@ -446,7 +523,8 @@ namespace dawn_native { namespace d3d12 {
ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
- resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer());
+ resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
+ Aspect::Color);
constexpr uint32_t kColorTextureSubresourceIndex = 0;
commandContext->GetCommandList()->ResolveSubresource(
resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
@@ -457,11 +535,7 @@ namespace dawn_native { namespace d3d12 {
} // anonymous namespace
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
- }
-
- CommandBuffer::~CommandBuffer() {
- FreeCommands(&mCommands);
+ : CommandBufferBase(encoder, descriptor) {
}
MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
@@ -484,10 +558,15 @@ namespace dawn_native { namespace d3d12 {
wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
for (size_t i = 0; i < usages.buffers.size(); ++i) {
+ Buffer* buffer = ToBackend(usages.buffers[i]);
+
+ // TODO(jiawei.shao@intel.com): clear storage buffers with
+ // ClearUnorderedAccessView*().
+ buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+
D3D12_RESOURCE_BARRIER barrier;
- if (ToBackend(usages.buffers[i])
- ->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
- usages.bufferUsages[i])) {
+ if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+ usages.bufferUsages[i])) {
barriers.push_back(barrier);
}
bufferUsages |= usages.bufferUsages[i];
@@ -560,14 +639,18 @@ namespace dawn_native { namespace d3d12 {
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
+ DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
+ DAWN_TRY(dstBuffer->EnsureDataInitializedAsDestination(
+ commandContext, copy->destinationOffset, copy->size));
+
srcBuffer->TrackUsageAndTransitionNow(commandContext,
wgpu::BufferUsage::CopySrc);
dstBuffer->TrackUsageAndTransitionNow(commandContext,
wgpu::BufferUsage::CopyDst);
commandList->CopyBufferRegion(
- dstBuffer->GetD3D12Resource().Get(), copy->destinationOffset,
- srcBuffer->GetD3D12Resource().Get(), copy->sourceOffset, copy->size);
+ dstBuffer->GetD3D12Resource(), copy->destinationOffset,
+ srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
break;
}
@@ -576,13 +659,12 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer = ToBackend(copy->source.buffer.Get());
Texture* texture = ToBackend(copy->destination.texture.Get());
+ DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
+
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
- // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- SubresourceRange subresources = {copy->destination.mipLevel, 1,
- copy->destination.arrayLayer,
- copy->copySize.depth};
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
copy->destination.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, subresources);
@@ -594,43 +676,12 @@ namespace dawn_native { namespace d3d12 {
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
subresources);
- const uint64_t bytesPerSlice =
- copy->source.bytesPerRow * copy->source.rowsPerImage;
-
- const dawn_native::Extent3D copyOneLayerSize = {copy->copySize.width,
- copy->copySize.height, 1};
- uint64_t bufferOffsetForNextSlice = 0;
- for (uint32_t copySlice = copy->destination.arrayLayer;
- copySlice < copy->destination.arrayLayer + copy->copySize.depth;
- ++copySlice) {
- // TODO(jiawei.shao@intel.com): compute copySplit once for all texture array
- // layers when possible.
- auto copySplit = ComputeTextureCopySplit(
- copy->destination.origin, copyOneLayerSize, texture->GetFormat(),
- bufferOffsetForNextSlice + copy->source.offset,
- copy->source.bytesPerRow, copy->source.rowsPerImage);
-
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(
- texture, copy->destination.mipLevel, copySlice);
-
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- const TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
- copySplit.offset, copy->source.bytesPerRow);
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- commandList->CopyTextureRegion(
- &textureLocation, info.textureOffset.x, info.textureOffset.y,
- info.textureOffset.z, &bufferLocation, &sourceRegion);
- }
+ // compute the copySplits and record the CopyTextureRegion commands
+ CopyBufferToTextureWithCopySplit(
+ commandContext, copy->destination, copy->copySize, texture,
+ buffer->GetD3D12Resource(), copy->source.offset, copy->source.bytesPerRow,
+ copy->source.rowsPerImage, subresources.aspects);
- bufferOffsetForNextSlice += bytesPerSlice;
- }
break;
}
@@ -639,55 +690,57 @@ namespace dawn_native { namespace d3d12 {
Texture* texture = ToBackend(copy->source.texture.Get());
Buffer* buffer = ToBackend(copy->destination.buffer.Get());
+ DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
+
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
- // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- SubresourceRange subresources = {copy->source.mipLevel, 1,
- copy->source.arrayLayer, copy->copySize.depth};
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
texture->EnsureSubresourceContentInitialized(commandContext, subresources);
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
subresources);
buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- const uint64_t bytesPerSlice =
- copy->destination.bytesPerRow * copy->destination.rowsPerImage;
-
- const dawn_native::Extent3D copyOneLayerSize = {copy->copySize.width,
- copy->copySize.height, 1};
- uint64_t bufferOffsetForNextSlice = 0;
- for (uint32_t copySlice = copy->source.arrayLayer;
- copySlice < copy->source.arrayLayer + copy->copySize.depth; ++copySlice) {
- // TODO(jiawei.shao@intel.com): compute copySplit once for all texture array
- // layers when possible.
- TextureCopySplit copySplit = ComputeTextureCopySplit(
- copy->source.origin, copyOneLayerSize, texture->GetFormat(),
- bufferOffsetForNextSlice + copy->destination.offset,
- copy->destination.bytesPerRow, copy->destination.rowsPerImage);
-
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(texture, copy->source.mipLevel,
- copySlice);
-
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- const TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- texture, buffer->GetD3D12Resource().Get(), info.bufferSize,
- copySplit.offset, copy->destination.bytesPerRow);
-
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
-
- commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
- info.bufferOffset.y, info.bufferOffset.z,
- &textureLocation, &sourceRegion);
- }
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetTexelBlockInfo(copy->source.aspect);
- bufferOffsetForNextSlice += bytesPerSlice;
+ // See comments around ComputeTextureCopySplits() for more details.
+ const TextureCopySplits copySplits = ComputeTextureCopySplits(
+ copy->source.origin, copy->copySize, blockInfo, copy->destination.offset,
+ copy->destination.bytesPerRow, copy->destination.rowsPerImage);
+
+ const uint64_t bytesPerSlice =
+ copy->destination.bytesPerRow *
+ (copy->destination.rowsPerImage / blockInfo.blockHeight);
+
+ // copySplits.copies2D[1] is always calculated for the second copy slice with
+ // extra "bytesPerSlice" copy offset compared with the first copy slice. So
+ // here we use an array bufferOffsetsForNextSlice to record the extra offsets
+ // for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
+ // the next copy slice that uses copySplits.copies2D[0], and
+ // bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
+ // that uses copySplits.copies2D[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits>
+ bufferOffsetsForNextSlice = {{0u, 0u}};
+ for (uint32_t copySlice = 0; copySlice < copy->copySize.depth; ++copySlice) {
+ const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
+
+ const Texture2DCopySplit& copySplitPerLayerBase =
+ copySplits.copies2D[splitIndex];
+ const uint64_t bufferOffsetForNextSlice =
+ bufferOffsetsForNextSlice[splitIndex];
+ const uint32_t copyTextureLayer = copySlice + copy->source.origin.z;
+
+ RecordCopyTextureToBufferFromTextureCopySplit(
+ commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextSlice,
+ copy->destination.bytesPerRow, texture, copy->source.mipLevel,
+ copyTextureLayer, subresources.aspects);
+
+ bufferOffsetsForNextSlice[splitIndex] +=
+ bytesPerSlice * copySplits.copies2D.size();
}
+
break;
}
@@ -697,11 +750,11 @@ namespace dawn_native { namespace d3d12 {
Texture* source = ToBackend(copy->source.texture.Get());
Texture* destination = ToBackend(copy->destination.texture.Get());
- SubresourceRange srcRange = {copy->source.mipLevel, 1, copy->source.arrayLayer,
- copy->copySize.depth};
- SubresourceRange dstRange = {copy->destination.mipLevel, 1,
- copy->destination.arrayLayer,
- copy->copySize.depth};
+
+ SubresourceRange srcRange =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+ SubresourceRange dstRange =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
source->EnsureSubresourceContentInitialized(commandContext, srcRange);
if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
@@ -717,8 +770,7 @@ namespace dawn_native { namespace d3d12 {
// subresources should all be COMMON instead of what we set now. Currently
// it is not allowed to copy with overlapped subresources, but we still
// add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(copy->source.arrayLayer,
- copy->destination.arrayLayer,
+ ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
copy->copySize.depth));
}
source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
@@ -726,6 +778,7 @@ namespace dawn_native { namespace d3d12 {
destination->TrackUsageAndTransitionNow(commandContext,
wgpu::TextureUsage::CopyDst, dstRange);
+ ASSERT(srcRange.aspects == dstRange.aspects);
if (CanUseCopyResource(source, destination, copy->copySize)) {
commandList->CopyResource(destination->GetD3D12Resource(),
source->GetD3D12Resource());
@@ -735,28 +788,56 @@ namespace dawn_native { namespace d3d12 {
destination->GetDimension() == wgpu::TextureDimension::e2D);
const dawn_native::Extent3D copyExtentOneSlice = {
copy->copySize.width, copy->copySize.height, 1u};
- for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
- D3D12_TEXTURE_COPY_LOCATION srcLocation =
- ComputeTextureCopyLocationForTexture(
- source, copy->source.mipLevel, copy->source.arrayLayer + slice);
-
- D3D12_TEXTURE_COPY_LOCATION dstLocation =
- ComputeTextureCopyLocationForTexture(
- destination, copy->destination.mipLevel,
- copy->destination.arrayLayer + slice);
-
- D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
- copy->source.origin, copyExtentOneSlice);
-
- commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
- copy->destination.origin.y,
- copy->destination.origin.z, &srcLocation,
- &sourceRegion);
+
+ for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+ for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
+ D3D12_TEXTURE_COPY_LOCATION srcLocation =
+ ComputeTextureCopyLocationForTexture(
+ source, copy->source.mipLevel,
+ copy->source.origin.z + slice, aspect);
+
+ D3D12_TEXTURE_COPY_LOCATION dstLocation =
+ ComputeTextureCopyLocationForTexture(
+ destination, copy->destination.mipLevel,
+ copy->destination.origin.z + slice, aspect);
+
+ Origin3D sourceOriginInSubresource = copy->source.origin;
+ sourceOriginInSubresource.z = 0;
+ D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+ sourceOriginInSubresource, copyExtentOneSlice);
+
+ commandList->CopyTextureRegion(
+ &dstLocation, copy->destination.origin.x,
+ copy->destination.origin.y, 0, &srcLocation, &sourceRegion);
+ }
}
}
break;
}
+ case Command::ResolveQuerySet: {
+ ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+ QuerySet* querySet = ToBackend(cmd->querySet.Get());
+ Buffer* destination = ToBackend(cmd->destination.Get());
+
+ commandList->ResolveQueryData(
+ querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
+ cmd->firstQuery, cmd->queryCount, destination->GetD3D12Resource(),
+ cmd->destinationOffset);
+
+ // TODO(hao.x.li@intel.com): Add compute shader to convert the query result
+ // (ticks) to timestamp (ns)
+
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
default: {
UNREACHABLE();
break;
@@ -790,8 +871,7 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDispatchIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1,
- buffer->GetD3D12Resource().Get(),
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
dispatch->indirectOffset, nullptr, 0);
break;
}
@@ -868,6 +948,13 @@ namespace dawn_native { namespace d3d12 {
break;
}
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
default: {
UNREACHABLE();
break;
@@ -1106,8 +1193,7 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDrawIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1,
- buffer->GetD3D12Resource().Get(),
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
draw->indirectOffset, nullptr, 0);
break;
}
@@ -1121,8 +1207,7 @@ namespace dawn_native { namespace d3d12 {
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
ComPtr<ID3D12CommandSignature> signature =
ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
- commandList->ExecuteIndirect(signature.Get(), 1,
- buffer->GetD3D12Resource().Get(),
+ commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
draw->indirectOffset, nullptr, 0);
break;
}
@@ -1286,6 +1371,13 @@ namespace dawn_native { namespace d3d12 {
break;
}
+ case Command::WriteTimestamp: {
+ WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+ RecordWriteTimestampCmd(commandList, cmd);
+ break;
+ }
+
default: {
DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
index cc53fd54cf9..f4d858c8585 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/CommandBufferD3D12.h
@@ -16,7 +16,6 @@
#define DAWNNATIVE_D3D12_COMMANDBUFFERD3D12_H_
#include "common/Constants.h"
-#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/Error.h"
#include "dawn_native/d3d12/Forward.h"
@@ -43,7 +42,6 @@ namespace dawn_native { namespace d3d12 {
MaybeError RecordCommands(CommandRecordingContext* commandContext);
private:
- ~CommandBuffer() override;
MaybeError RecordComputePass(CommandRecordingContext* commandContext,
BindGroupStateTracker* bindingTracker);
MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
@@ -55,8 +53,6 @@ namespace dawn_native { namespace d3d12 {
RenderPassBuilder* renderPassBuilder);
void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
const RenderPassBuilder* renderPassBuilder) const;
-
- CommandIterator mCommands;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
index f78975ef7a9..71965994265 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Backend.cpp
@@ -69,4 +69,7 @@ namespace dawn_native { namespace d3d12 {
return reinterpret_cast<WGPUTexture>(texture.Detach());
}
+ AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
+ : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
+ }
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
index 16ea4c4e834..ade06fe62e6 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Error.h
@@ -42,4 +42,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_D3D12ERROR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
index 6505e44aef5..e19dd8179be 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.cpp
@@ -50,15 +50,59 @@ namespace dawn_native { namespace d3d12 {
D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
- // Performance regressions been observed when using a render pass on Intel graphics with
- // RENDER_PASS_TIER_1 available, so fall back to a software emulated render pass on
- // these platforms.
+ // Performance regressions been observed when using a render pass on Intel graphics
+ // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
+ // pass on these platforms.
if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
!gpu_info::IsIntel(adapter.GetPCIInfo().vendorId)) {
info.supportsRenderPass = true;
}
}
- return info;
+ D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
+ {D3D_SHADER_MODEL_6_1},
+ {D3D_SHADER_MODEL_6_0},
+ {D3D_SHADER_MODEL_5_1}};
+ uint32_t driverShaderModel = 0;
+ for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
+ driverShaderModel = shaderModel.HighestShaderModel;
+ break;
+ }
+ }
+
+ if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
+ return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+ }
+
+ // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
+ ASSERT(driverShaderModel <= 0xFF);
+ uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
+ uint32_t shaderModelMinor = (driverShaderModel & 0xF);
+
+ ASSERT(shaderModelMajor < 10);
+ ASSERT(shaderModelMinor < 10);
+ info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
+
+ // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
+ // it to each of the stage's suffix.
+ std::wstring profileSuffix = L"s_M_n";
+ profileSuffix[2] = wchar_t('0' + shaderModelMajor);
+ profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+
+ info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
+ info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+
+ D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
+ if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+ D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
+ info.supportsShaderFloat16 = info.shaderModel >= D3D_SHADER_MODEL_6_2 &&
+ featureData4.Native16BitShaderOpsSupported;
+ }
+
+ return std::move(info);
}
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
index 78d38208169..46b2d09eba2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/D3D12Info.h
@@ -16,6 +16,7 @@
#define DAWNNATIVE_D3D12_D3D12INFO_H_
#include "dawn_native/Error.h"
+#include "dawn_native/PerStage.h"
#include "dawn_native/d3d12/d3d12_platform.h"
namespace dawn_native { namespace d3d12 {
@@ -26,9 +27,14 @@ namespace dawn_native { namespace d3d12 {
bool isUMA;
uint32_t resourceHeapTier;
bool supportsRenderPass;
+ bool supportsShaderFloat16;
+ // shaderModel indicates the maximum supported shader model, for example, the value 62
+ // indicates that current driver supports the maximum shader model is shader model 6.2.
+ uint32_t shaderModel;
+ PerStage<std::wstring> shaderProfiles;
};
ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_D3D12INFO_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_D3D12INFO_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
index 9f180450c5c..145a3829c84 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "dawn_native/BackendConnection.h"
#include "dawn_native/ErrorData.h"
+#include "dawn_native/Format.h"
#include "dawn_native/Instance.h"
#include "dawn_native/d3d12/AdapterD3D12.h"
#include "dawn_native/d3d12/BackendD3D12.h"
@@ -29,6 +30,7 @@
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/PipelineLayoutD3D12.h"
#include "dawn_native/d3d12/PlatformFunctions.h"
+#include "dawn_native/d3d12/QuerySetD3D12.h"
#include "dawn_native/d3d12/QueueD3D12.h"
#include "dawn_native/d3d12/RenderPipelineD3D12.h"
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
@@ -41,6 +43,7 @@
#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
#include "dawn_native/d3d12/SwapChainD3D12.h"
#include "dawn_native/d3d12/TextureD3D12.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
#include <sstream>
@@ -90,14 +93,15 @@ namespace dawn_native { namespace d3d12 {
mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
// Zero sized allocator is never requested and does not need to exist.
- for (uint32_t countIndex = 1; countIndex < kNumOfStagingDescriptorAllocators;
- countIndex++) {
- mViewAllocators[countIndex] = std::make_unique<StagingDescriptorAllocator>(
- this, countIndex, kShaderVisibleDescriptorHeapSize,
+ for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
+ mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
+ }
- mSamplerAllocators[countIndex] = std::make_unique<StagingDescriptorAllocator>(
- this, countIndex, kShaderVisibleDescriptorHeapSize,
+ for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
+ mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+ this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
}
@@ -270,10 +274,10 @@ namespace dawn_native { namespace d3d12 {
const BindGroupLayoutDescriptor* descriptor) {
return new BindGroupLayout(this, descriptor);
}
- ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
Ref<Buffer> buffer = AcquireRef(new Buffer(this, descriptor));
DAWN_TRY(buffer->Initialize());
- return buffer.Detach();
+ return std::move(buffer);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
@@ -288,7 +292,7 @@ namespace dawn_native { namespace d3d12 {
return PipelineLayout::Create(this, descriptor);
}
ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
- return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation");
+ return QuerySet::Create(this, descriptor);
}
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
@@ -336,12 +340,55 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
Buffer* dstBuffer = ToBackend(destination);
+
+ DAWN_TRY(dstBuffer->EnsureDataInitializedAsDestination(commandRecordingContext,
+ destinationOffset, size));
+
+ CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
+ destinationOffset, size);
+
+ return {};
+ }
+
+ void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size) {
+ ASSERT(commandContext != nullptr);
+ Buffer* dstBuffer = ToBackend(destination);
StagingBuffer* srcBuffer = ToBackend(source);
- dstBuffer->TrackUsageAndTransitionNow(commandRecordingContext, wgpu::BufferUsage::CopyDst);
+ dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
- commandRecordingContext->GetCommandList()->CopyBufferRegion(
- dstBuffer->GetD3D12Resource().Get(), destinationOffset, srcBuffer->GetResource(),
+ commandContext->GetCommandList()->CopyBufferRegion(
+ dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
sourceOffset, size);
+ }
+
+ MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ CommandRecordingContext* commandContext;
+ DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
+ Texture* texture = ToBackend(dst->texture.Get());
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(commandContext, range);
+ }
+
+ texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
+
+ // compute the copySplits and record the CopyTextureRegion commands
+ CopyBufferToTextureWithCopySplit(commandContext, *dst, copySizePixels, texture,
+ ToBackend(source)->GetResource(), src.offset,
+ src.bytesPerRow, src.rowsPerImage, range.aspects);
return {};
}
@@ -523,8 +570,11 @@ namespace dawn_native { namespace d3d12 {
::CloseHandle(mFenceEvent);
}
+ // Release recycled resource heaps.
+ mResourceAllocatorManager->DestroyPool();
+
// We need to handle clearing up com object refs that were enqeued after TickImpl
- mUsedComObjectRefs.ClearUpTo(GetCompletedCommandSerial());
+ mUsedComObjectRefs.ClearUpTo(std::numeric_limits<Serial>::max());
ASSERT(mUsedComObjectRefs.Empty());
ASSERT(!mPendingCommands.IsOpen());
@@ -540,14 +590,18 @@ namespace dawn_native { namespace d3d12 {
StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
uint32_t descriptorCount) const {
- ASSERT(descriptorCount < kNumOfStagingDescriptorAllocators);
- return mViewAllocators[descriptorCount].get();
+ ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mViewAllocators[allocatorIndex].get();
}
StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
uint32_t descriptorCount) const {
- ASSERT(descriptorCount < kNumOfStagingDescriptorAllocators);
- return mSamplerAllocators[descriptorCount].get();
+ ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
+ // This is Log2 of the next power of two, plus 1.
+ uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+ return mSamplerAllocators[allocatorIndex].get();
}
StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
index 1ee40927dfb..3ab03934add 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/DeviceD3D12.h
@@ -19,6 +19,8 @@
#include "common/Constants.h"
#include "common/SerialQueue.h"
+#include "dawn_native/BindingInfo.h"
+#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
#include "dawn_native/d3d12/D3D12Info.h"
@@ -91,6 +93,18 @@ namespace dawn_native { namespace d3d12 {
uint64_t destinationOffset,
uint64_t size) override;
+ void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+ StagingBufferBase* source,
+ uint64_t sourceOffset,
+ BufferBase* destination,
+ uint64_t destinationOffset,
+ uint64_t size);
+
+ MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels);
+
ResultOrError<ResourceHeapAllocation> AllocateMemory(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
@@ -131,7 +145,8 @@ namespace dawn_native { namespace d3d12 {
const BindGroupDescriptor* descriptor) override;
ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
- ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
@@ -184,13 +199,24 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
std::unique_ptr<ResidencyManager> mResidencyManager;
- // Index corresponds to the descriptor count in the range [0, kMaxBindingsPerGroup].
- static constexpr uint32_t kNumOfStagingDescriptorAllocators = kMaxBindingsPerGroup + 1;
+ static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
+ 3 * kMaxSamplersPerShaderStage;
+ static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
+ kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
+
+ static constexpr uint32_t kNumSamplerDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
+ static constexpr uint32_t kNumViewDescriptorAllocators =
+ ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumOfStagingDescriptorAllocators>
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxSamplerDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
mViewAllocators;
- std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumOfStagingDescriptorAllocators>
+ // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+ // the range [0, kMaxViewDescriptorsPerBindGroup].
+ std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
mSamplerAllocators;
std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
index c72605a58b6..3d3ba20fad3 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
@@ -36,4 +36,4 @@ namespace dawn_native { namespace d3d12 {
Serial GPUDescriptorHeapAllocation::GetHeapSerial() const {
return mHeapSerial;
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h
index c18d2662c3a..08930d79fcf 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/GPUDescriptorHeapAllocationD3D12.h
@@ -40,4 +40,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_CPUDESCRIPTORHEAPALLOCATION_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
index ade5d4a5c1c..7bb4e323a67 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.cpp
@@ -28,4 +28,4 @@ namespace dawn_native { namespace d3d12 {
return mD3d12Heap.Get();
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
index 715ffcdaf10..b59c6449dec 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/HeapD3D12.h
@@ -37,4 +37,4 @@ namespace dawn_native { namespace d3d12 {
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_HEAPD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_HEAPD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
index 58847808644..5c5ef9c0f0d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PageableD3D12.cpp
@@ -73,4 +73,4 @@ namespace dawn_native { namespace d3d12 {
bool Pageable::IsResidencyLocked() const {
return mResidencyLockRefCount != 0;
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
index 5e3460bb0f5..8eed07ed2c9 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.cpp
@@ -194,7 +194,7 @@ namespace dawn_native { namespace d3d12 {
uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
BindingIndex bindingIndex) const {
ASSERT(group < kMaxBindGroupsTyped);
- ASSERT(bindingIndex < kMaxBindingsPerGroupTyped);
+ ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).hasDynamicOffset);
ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
wgpu::ShaderStage::None);
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
index 8de41b06773..6116cd81988 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/PipelineLayoutD3D12.h
@@ -45,7 +45,7 @@ namespace dawn_native { namespace d3d12 {
ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>,
+ ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
kMaxBindGroups>
mDynamicRootParameterIndices;
ComPtr<ID3D12RootSignature> mRootSignature;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
new file mode 100644
index 00000000000..2bea5263167
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.cpp
@@ -0,0 +1,68 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/d3d12/QuerySetD3D12.h"
+
+#include "dawn_native/d3d12/D3D12Error.h"
+#include "dawn_native/d3d12/DeviceD3D12.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ namespace {
+ D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } // anonymous namespace
+
+ // static
+ ResultOrError<QuerySet*> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(querySet->Initialize());
+ return querySet.Detach();
+ }
+
+ MaybeError QuerySet::Initialize() {
+ D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
+ queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
+ queryHeapDesc.Count = GetQueryCount();
+
+ ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
+ return CheckOutOfMemoryHRESULT(
+ d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
+ "ID3D12Device::CreateQueryHeap");
+ }
+
+ ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
+ return mQueryHeap.Get();
+ }
+
+ QuerySet::~QuerySet() {
+ DestroyInternal();
+ }
+
+ void QuerySet::DestroyImpl() {
+ ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
+ }
+
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
new file mode 100644
index 00000000000..7b24cceb67d
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QuerySetD3D12.h
@@ -0,0 +1,45 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+#define DAWNNATIVE_D3D12_QUERYSETD3D12_H_
+
+#include "dawn_native/QuerySet.h"
+#include "dawn_native/d3d12/d3d12_platform.h"
+
+namespace dawn_native { namespace d3d12 {
+
+ class Device;
+
+ class QuerySet : public QuerySetBase {
+ public:
+ static ResultOrError<QuerySet*> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ ID3D12QueryHeap* GetQueryHeap() const;
+
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ // Dawn API
+ void DestroyImpl() override;
+
+ ComPtr<ID3D12QueryHeap> mQueryHeap;
+ };
+
+}} // namespace dawn_native::d3d12
+
+#endif // DAWNNATIVE_D3D12_QUERYSETD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
index 710e41f052c..b17919eec4b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.cpp
@@ -14,6 +14,10 @@
#include "dawn_native/d3d12/QueueD3D12.h"
+#include "common/Math.h"
+#include "dawn_native/CommandValidation.h"
+#include "dawn_native/Commands.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/d3d12/CommandBufferD3D12.h"
#include "dawn_native/d3d12/D3D12Error.h"
#include "dawn_native/d3d12/DeviceD3D12.h"
@@ -22,6 +26,50 @@
namespace dawn_native { namespace d3d12 {
+ namespace {
+ ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
+ DeviceBase* device,
+ const void* data,
+ uint32_t alignedBytesPerRow,
+ uint32_t optimallyAlignedBytesPerRow,
+ uint32_t alignedRowsPerImage,
+ const TextureDataLayout& dataLayout,
+ const Format& textureFormat,
+ const Extent3D& writeSizePixel) {
+ uint64_t newDataSizeBytes;
+ DAWN_TRY_ASSIGN(
+ newDataSizeBytes,
+ ComputeRequiredBytesInCopy(textureFormat, writeSizePixel,
+ optimallyAlignedBytesPerRow, alignedRowsPerImage));
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ newDataSizeBytes, device->GetPendingCommandSerial(),
+ textureFormat.blockByteSize));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+ const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+ srcPointer += dataLayout.offset;
+
+ uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / textureFormat.blockHeight;
+ uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / textureFormat.blockHeight;
+ if (dataRowsPerImageInBlock == 0) {
+ dataRowsPerImageInBlock = writeSizePixel.height / textureFormat.blockHeight;
+ }
+
+ ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
+ uint64_t imageAdditionalStride =
+ dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
+
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
+ alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
+ optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
+
+ return uploadHandle;
+ }
+ } // namespace
+
Queue::Queue(Device* device) : QueueBase(device) {
}
@@ -47,4 +95,43 @@ namespace dawn_native { namespace d3d12 {
return {};
}
+ MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ const TexelBlockInfo& blockInfo =
+ destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
+
+ // We are only copying the part of the data that will appear in the texture.
+ // Note that validating texture copy range ensures that writeSizePixel->width and
+ // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+ uint32_t alignedBytesPerRow =
+ (writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
+ uint32_t alignedRowsPerImage = writeSizePixel.height;
+ uint32_t optimallyAlignedBytesPerRow =
+ Align(alignedBytesPerRow, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(
+ uploadHandle,
+ UploadTextureDataAligningBytesPerRow(
+ GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+ alignedRowsPerImage, dataLayout, destination.texture->GetFormat(), writeSizePixel));
+
+ TextureDataLayout passDataLayout = dataLayout;
+ passDataLayout.offset = uploadHandle.startOffset;
+ passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+ passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
+
+ return ToBackend(GetDevice())
+ ->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
+ writeSizePixel);
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
index f211d0bf587..6b23a4569eb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/QueueD3D12.h
@@ -31,6 +31,10 @@ namespace dawn_native { namespace d3d12 {
private:
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) override;
};
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
index 19657849a19..587f48a9aec 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.cpp
@@ -86,12 +86,14 @@ namespace dawn_native { namespace d3d12 {
D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+ ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
subresourceParameters.DstX = 0;
subresourceParameters.DstY = 0;
subresourceParameters.SrcSubresource = 0;
subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
- resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer());
+ resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
+ Aspect::Color);
// Resolving a specified sub-rect is only valid on hardware that supports sample
// positions. This means even {0, 0, width, height} would be invalid if unsupported. To
// avoid this, we assume sub-rect resolves never work by setting them to all zeros or
@@ -234,4 +236,4 @@ namespace dawn_native { namespace d3d12 {
D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
index aadb2e8226d..cf61a182a09 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPassBuilderD3D12.h
@@ -88,4 +88,4 @@ namespace dawn_native { namespace d3d12 {
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
index f629e76f261..165315469fe 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/RenderPipelineD3D12.cpp
@@ -327,7 +327,6 @@ namespace dawn_native { namespace d3d12 {
wgpu::ShaderStage renderStages = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment;
for (auto stage : IterateStages(renderStages)) {
-
std::string hlslSource;
DAWN_TRY_ASSIGN(hlslSource, modules[stage]->GetHLSLSource(ToBackend(GetLayout())));
@@ -384,13 +383,13 @@ namespace dawn_native { namespace d3d12 {
}
descriptorD3D12.NumRenderTargets = static_cast<uint32_t>(GetColorAttachmentsMask().count());
- descriptorD3D12.BlendState.AlphaToCoverageEnable = FALSE;
+ descriptorD3D12.BlendState.AlphaToCoverageEnable = descriptor->alphaToCoverageEnabled;
descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
descriptorD3D12.DepthStencilState =
ComputeDepthStencilDesc(GetDepthStencilStateDescriptor());
- descriptorD3D12.SampleMask = UINT_MAX;
+ descriptorD3D12.SampleMask = GetSampleMask();
descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
descriptorD3D12.SampleDesc.Count = GetSampleCount();
descriptorD3D12.SampleDesc.Quality = 0;
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
index 053a1690fdb..e66296e4e26 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.cpp
@@ -367,4 +367,4 @@ namespace dawn_native { namespace d3d12 {
}
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
index 304a211b084..a79a4fca6a2 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResidencyManagerD3D12.h
@@ -80,4 +80,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
index 4e48f2edafc..8090b7db468 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -19,6 +19,7 @@
#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
#include "dawn_native/d3d12/HeapD3D12.h"
#include "dawn_native/d3d12/ResidencyManagerD3D12.h"
+#include "dawn_native/d3d12/UtilsD3D12.h"
namespace dawn_native { namespace d3d12 {
namespace {
@@ -148,6 +149,17 @@ namespace dawn_native { namespace d3d12 {
}
}
+ bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
+ // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
+ // textures, or typeless resources
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
+ // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+ return !IsTypeless(resourceDescriptor.Format) &&
+ resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
+ (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
+ D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
+ }
+
} // namespace
ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
@@ -160,8 +172,10 @@ namespace dawn_native { namespace d3d12 {
mHeapAllocators[i] = std::make_unique<HeapAllocator>(
mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
+ mPooledHeapAllocators[i] =
+ std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
- kMaxHeapSize, kMinHeapSize, mHeapAllocators[i].get());
+ kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
}
}
@@ -169,14 +183,25 @@ namespace dawn_native { namespace d3d12 {
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
D3D12_RESOURCE_STATES initialUsage) {
+ // In order to suppress a warning in the D3D12 debug layer, we need to specify an
+ // optimized clear value. As there are no negative consequences when picking a mismatched
+ // clear value, we use zero as the optimized clear value. This also enables fast clears on
+ // some architectures.
+ D3D12_CLEAR_VALUE zero{};
+ D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
+ if (IsClearValueOptimizable(resourceDescriptor)) {
+ zero.Format = resourceDescriptor.Format;
+ optimizedClearValue = &zero;
+ }
+
// TODO(bryan.bernhart@intel.com): Conditionally disable sub-allocation.
// For very large resources, there is no benefit to suballocate.
// For very small resources, it is inefficent to suballocate given the min. heap
// size could be much larger then the resource allocation.
// Attempt to satisfy the request using sub-allocation (placed resource in a heap).
ResourceHeapAllocation subAllocation;
- DAWN_TRY_ASSIGN(subAllocation,
- CreatePlacedResource(heapType, resourceDescriptor, initialUsage));
+ DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
+ optimizedClearValue, initialUsage));
if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
return std::move(subAllocation);
}
@@ -184,7 +209,8 @@ namespace dawn_native { namespace d3d12 {
// If sub-allocation fails, fall-back to direct allocation (committed resource).
ResourceHeapAllocation directAllocation;
DAWN_TRY_ASSIGN(directAllocation,
- CreateCommittedResource(heapType, resourceDescriptor, initialUsage));
+ CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
+ initialUsage));
if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
return std::move(directAllocation);
}
@@ -221,7 +247,7 @@ namespace dawn_native { namespace d3d12 {
// calls DeallocateMemory again using the same allocation.
allocation.Invalidate();
- ASSERT(allocation.GetD3D12Resource().Get() == nullptr);
+ ASSERT(allocation.GetD3D12Resource() == nullptr);
}
void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
@@ -242,6 +268,7 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
D3D12_RESOURCE_STATES initialUsage) {
const ResourceHeapKind resourceHeapKind =
GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
@@ -252,12 +279,15 @@ namespace dawn_native { namespace d3d12 {
resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
requestedResourceDescriptor.Alignment);
+ // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
+ // twice.
D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
// If the requested resource alignment was rejected, let D3D tell us what the
// required alignment is for this resource.
- if (resourceDescriptor.Alignment != resourceInfo.Alignment) {
+ if (resourceDescriptor.Alignment != 0 &&
+ resourceDescriptor.Alignment != resourceInfo.Alignment) {
resourceDescriptor.Alignment = 0;
resourceInfo =
mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
@@ -299,7 +329,7 @@ namespace dawn_native { namespace d3d12 {
DAWN_TRY(CheckOutOfMemoryHRESULT(
mDevice->GetD3D12Device()->CreatePlacedResource(
heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
- nullptr, IID_PPV_ARGS(&placedResource)),
+ optimizedClearValue, IID_PPV_ARGS(&placedResource)),
"ID3D12Device::CreatePlacedResource"));
// After CreatePlacedResource has finished, the heap can be unlocked from residency. This
@@ -313,6 +343,7 @@ namespace dawn_native { namespace d3d12 {
ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
D3D12_RESOURCE_STATES initialUsage) {
D3D12_HEAP_PROPERTIES heapProperties;
heapProperties.Type = heapType;
@@ -345,11 +376,11 @@ namespace dawn_native { namespace d3d12 {
// Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
// provided to CreateCommittedResource.
ComPtr<ID3D12Resource> committedResource;
- DAWN_TRY(
- CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateCommittedResource(
- &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor,
- initialUsage, nullptr, IID_PPV_ARGS(&committedResource)),
- "ID3D12Device::CreateCommittedResource"));
+ DAWN_TRY(CheckOutOfMemoryHRESULT(
+ mDevice->GetD3D12Device()->CreateCommittedResource(
+ &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
+ optimizedClearValue, IID_PPV_ARGS(&committedResource)),
+ "ID3D12Device::CreateCommittedResource"));
// When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
// resource allocation. Because Dawn's memory residency management occurs at the resource
@@ -370,4 +401,10 @@ namespace dawn_native { namespace d3d12 {
/*offset*/ 0, std::move(committedResource), heap};
}
+ void ResourceAllocatorManager::DestroyPool() {
+ for (auto& alloc : mPooledHeapAllocators) {
+ alloc->DestroyPool();
+ }
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
index b60a024b27c..0bf2a02c61e 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -17,6 +17,7 @@
#include "common/SerialQueue.h"
#include "dawn_native/BuddyMemoryAllocator.h"
+#include "dawn_native/PooledResourceMemoryAllocator.h"
#include "dawn_native/d3d12/HeapAllocatorD3D12.h"
#include "dawn_native/d3d12/ResourceHeapAllocationD3D12.h"
@@ -67,17 +68,21 @@ namespace dawn_native { namespace d3d12 {
void Tick(Serial lastCompletedSerial);
+ void DestroyPool();
+
private:
void FreeMemory(ResourceHeapAllocation& allocation);
ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
D3D12_RESOURCE_STATES initialUsage);
ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
D3D12_HEAP_TYPE heapType,
const D3D12_RESOURCE_DESC& resourceDescriptor,
+ const D3D12_CLEAR_VALUE* optimizedClearValue,
D3D12_RESOURCE_STATES initialUsage);
Device* mDevice;
@@ -90,6 +95,9 @@ namespace dawn_native { namespace d3d12 {
mSubAllocatedResourceAllocators;
std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
+ std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
+ mPooledHeapAllocators;
+
SerialQueue<ResourceHeapAllocation> mAllocationsToDelete;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
index c3a89f0590d..edf21c1e06d 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -32,8 +32,8 @@ namespace dawn_native { namespace d3d12 {
mResource.Reset();
}
- ComPtr<ID3D12Resource> ResourceHeapAllocation::GetD3D12Resource() const {
- return mResource;
+ ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
+ return mResource.Get();
}
D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
index 71b00fd5fd7..ace1a7efa00 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ResourceHeapAllocationD3D12.h
@@ -33,7 +33,7 @@ namespace dawn_native { namespace d3d12 {
void Invalidate() override;
- ComPtr<ID3D12Resource> GetD3D12Resource() const;
+ ID3D12Resource* GetD3D12Resource() const;
D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
private:
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
index 224051a1c42..8ba3aab3197 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -164,4 +164,4 @@ namespace dawn_native { namespace d3d12 {
const SamplerHeapCacheEntry* b) const {
return a->mSamplers == b->mSamplers;
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
index 2f41086c8b2..2e5a2d4237a 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/SamplerHeapCacheD3D12.h
@@ -105,4 +105,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_SAMPLERHEAPCACHE_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
index 770a006fb42..0bdbafa12cb 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderModuleD3D12.cpp
@@ -31,7 +31,7 @@
namespace dawn_native { namespace d3d12 {
namespace {
- std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags) {
+ std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
std::vector<const wchar_t*> arguments;
if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
arguments.push_back(L"/Gec");
@@ -70,9 +70,15 @@ namespace dawn_native { namespace d3d12 {
if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
arguments.push_back(L"/res_may_alias");
}
- // Enable FXC backward compatibility by setting the language version to 2016
- arguments.push_back(L"-HV");
- arguments.push_back(L"2016");
+
+ if (enable16BitTypes) {
+ // enable-16bit-types are only allowed in -HV 2018 (default)
+ arguments.push_back(L"/enable-16bit-types");
+ } else {
+ // Enable FXC backward compatibility by setting the language version to 2016
+ arguments.push_back(L"-HV");
+ arguments.push_back(L"2016");
+ }
return arguments;
}
@@ -98,7 +104,12 @@ namespace dawn_native { namespace d3d12 {
shaderc_spvc::CompileOptions options = GetCompileOptions();
options.SetForceZeroInitializedVariables(true);
- options.SetHLSLShaderModel(51);
+ if (GetDevice()->IsExtensionEnabled(Extension::ShaderFloat16)) {
+ options.SetHLSLShaderModel(ToBackend(GetDevice())->GetDeviceInfo().shaderModel);
+ options.SetHLSLEnable16BitTypes(true);
+ } else {
+ options.SetHLSLShaderModel(51);
+ }
// PointCoord and PointSize are not supported in HLSL
// TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
// required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
@@ -138,7 +149,12 @@ namespace dawn_native { namespace d3d12 {
options_glsl.force_zero_initialized_variables = true;
spirv_cross::CompilerHLSL::Options options_hlsl;
- options_hlsl.shader_model = 51;
+ if (GetDevice()->IsExtensionEnabled(Extension::ShaderFloat16)) {
+ options_hlsl.shader_model = ToBackend(GetDevice())->GetDeviceInfo().shaderModel;
+ options_hlsl.enable_16bit_types = true;
+ } else {
+ options_hlsl.shader_model = 51;
+ }
// PointCoord and PointSize are not supported in HLSL
// TODO (hao.x.li@intel.com): The point_coord_compat and point_size_compat are
// required temporarily for https://bugs.chromium.org/p/dawn/issues/detail?id=146,
@@ -210,19 +226,6 @@ namespace dawn_native { namespace d3d12 {
const std::string& hlslSource,
const char* entryPoint,
uint32_t compileFlags) {
- const wchar_t* targetProfile = nullptr;
- switch (stage) {
- case SingleShaderStage::Vertex:
- targetProfile = L"vs_6_0";
- break;
- case SingleShaderStage::Fragment:
- targetProfile = L"ps_6_0";
- break;
- case SingleShaderStage::Compute:
- targetProfile = L"cs_6_0";
- break;
- }
-
IDxcLibrary* dxcLibrary;
DAWN_TRY_ASSIGN(dxcLibrary, ToBackend(GetDevice())->GetOrCreateDxcLibrary());
@@ -237,13 +240,16 @@ namespace dawn_native { namespace d3d12 {
std::wstring entryPointW;
DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(entryPoint));
- std::vector<const wchar_t*> arguments = GetDXCArguments(compileFlags);
+ std::vector<const wchar_t*> arguments = GetDXCArguments(
+ compileFlags, GetDevice()->IsExtensionEnabled(Extension::ShaderFloat16));
ComPtr<IDxcOperationResult> result;
- DAWN_TRY(CheckHRESULT(
- dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(), targetProfile,
- arguments.data(), arguments.size(), nullptr, 0, nullptr, &result),
- "DXC compile"));
+ DAWN_TRY(
+ CheckHRESULT(dxcCompiler->Compile(
+ sourceBlob.Get(), nullptr, entryPointW.c_str(),
+ ToBackend(GetDevice())->GetDeviceInfo().shaderProfiles[stage].c_str(),
+ arguments.data(), arguments.size(), nullptr, 0, nullptr, &result),
+ "DXC compile"));
HRESULT hr;
DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
index 9039d8ab9d7..ba1b4939a27 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -20,10 +20,32 @@
namespace dawn_native { namespace d3d12 {
+ // Limits the min/max heap size to always be some known value for testing.
// Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {1024, 512};
- uint32_t GetD3D12ShaderVisibleHeapSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+ uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+ bool useSmallSize) {
+ if (useSmallSize) {
+ return kShaderVisibleSmallHeapSizes[heapType];
+ }
+
+ // Minimum heap size must be large enough to satisfy the largest descriptor allocation
+ // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
+ // memory should only a tiny fraction ever be used.
+ // TODO(dawn:155): Figure out these values.
+ switch (heapType) {
+ case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+ return 4096;
+ case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+ return 256;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
+ bool useSmallSize) {
if (useSmallSize) {
return kShaderVisibleSmallHeapSizes[heapType];
}
@@ -62,7 +84,10 @@ namespace dawn_native { namespace d3d12 {
D3D12_DESCRIPTOR_HEAP_TYPE heapType)
: mHeapType(heapType),
mDevice(device),
- mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)) {
+ mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+ mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
+ heapType,
+ mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
}
@@ -107,60 +132,74 @@ namespace dawn_native { namespace d3d12 {
mAllocator.Deallocate(completedSerial);
}
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
+ ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
+ // The size in bytes of a descriptor heap is best calculated by the increment size
+ // multiplied by the number of descriptors. In practice, this is only an estimate and
+ // the actual size may vary depending on the driver.
+ const uint64_t kSize = mSizeIncrement * descriptorCount;
+
+ DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+
+ ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
+ D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+ heapDescriptor.Type = mHeapType;
+ heapDescriptor.NumDescriptors = descriptorCount;
+ heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
+ heapDescriptor.NodeMask = 0;
+ DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+ &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+ "ID3D12Device::CreateDescriptorHeap"));
+
+ std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
+ std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
+
+ // We must track the allocation in the LRU when it is created, otherwise the residency
+ // manager will see the allocation as non-resident in the later call to LockAllocation.
+ mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
+
+ return std::move(descriptorHeap);
+ }
+
// Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
- // Return the switched out heap to the pool and retrieve the oldest heap that is no longer
- // used by GPU. This maintains a heap buffer to avoid frequently re-creating heaps for heavy
- // users.
- // TODO(dawn:256): Consider periodically triming to avoid OOM.
+ // Dynamically allocate using a two-phase allocation strategy.
+ // The first phase increasingly grows a small heap in binary sizes for light users while the
+ // second phase pool-allocates largest sized heaps for heavy users.
if (mHeap != nullptr) {
mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
- mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
- }
- // Recycle existing heap if possible.
- if (!mPool.empty() && mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
- descriptorHeap = std::move(mPool.front().heap);
- mPool.pop_front();
+ const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
+ mHeapType,
+ mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+ if (mDescriptorCount < maxDescriptorCount) {
+ // Phase #1. Grow the heaps in powers-of-two.
+ mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
+ mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
+ } else {
+ // Phase #2. Pool-allocate heaps.
+ // Return the switched out heap to the pool and retrieve the oldest heap that is no
+ // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
+ // heaps for heavy users.
+ // TODO(dawn:256): Consider periodically triming to avoid OOM.
+ mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
+ if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+ descriptorHeap = std::move(mPool.front().heap);
+ mPool.pop_front();
+ }
+ }
}
- // TODO(bryan.bernhart@intel.com): Allocating to max heap size wastes memory
- // should the developer not allocate any bindings for the heap type.
- // Consider dynamically re-sizing GPU heaps.
- const uint32_t descriptorCount = GetD3D12ShaderVisibleHeapSize(
- mHeapType, mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
if (descriptorHeap == nullptr) {
- // The size in bytes of a descriptor heap is best calculated by the increment size
- // multiplied by the number of descriptors. In practice, this is only an estimate and
- // the actual size may vary depending on the driver.
- const uint64_t kSize = mSizeIncrement * descriptorCount;
-
- DAWN_TRY(
- mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
-
- ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
- D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
- heapDescriptor.Type = mHeapType;
- heapDescriptor.NumDescriptors = descriptorCount;
- heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
- heapDescriptor.NodeMask = 0;
- DAWN_TRY(
- CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
- &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
- "ID3D12Device::CreateDescriptorHeap"));
- descriptorHeap = std::make_unique<ShaderVisibleDescriptorHeap>(
- std::move(d3d12DescriptorHeap), kSize);
- // We must track the allocation in the LRU when it is created, otherwise the residency
- // manager will see the allocation as non-resident in the later call to LockAllocation.
- mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
+ DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
}
DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
+
// Create a FIFO buffer from the recently created heap.
mHeap = std::move(descriptorHeap);
- mAllocator = RingBufferAllocator(descriptorCount);
+ mAllocator = RingBufferAllocator(mDescriptorCount);
// Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
// serial. When a bindgroup attempts to re-populate, it will compare with its recorded
@@ -209,4 +248,4 @@ namespace dawn_native { namespace d3d12 {
ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
return mD3d12DescriptorHeap.Get();
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
index aec20a3b449..d93e57a0730 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -78,6 +78,9 @@ namespace dawn_native { namespace d3d12 {
std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
};
+ ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
+ uint32_t descriptorCount) const;
+
std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
RingBufferAllocator mAllocator;
std::list<SerialDescriptorHeap> mPool;
@@ -91,7 +94,11 @@ namespace dawn_native { namespace d3d12 {
Serial mHeapSerial = 0;
uint32_t mSizeIncrement;
+
+ // The descriptor count is the current size of the heap in number of descriptors.
+ // This is stored on the allocator to avoid extra conversions.
+ uint32_t mDescriptorCount = 0;
};
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
index b3aec3f1d9d..11c612ba82f 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingBufferD3D12.cpp
@@ -69,6 +69,6 @@ namespace dawn_native { namespace d3d12 {
}
ID3D12Resource* StagingBuffer::GetResource() const {
- return mUploadHeap.GetD3D12Resource().Get();
+ return mUploadHeap.GetD3D12Resource();
}
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp
index 84fd7880ca8..92a47412e96 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.cpp
@@ -149,4 +149,4 @@ namespace dawn_native { namespace d3d12 {
mAllocationsToDelete.ClearUpTo(completedSerial);
}
-}} // namespace dawn_native::d3d12 \ No newline at end of file
+}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h
index eebbe2a1649..519920b4655 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h
@@ -82,4 +82,4 @@ namespace dawn_native { namespace d3d12 {
}} // namespace dawn_native::d3d12
-#endif // DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_ \ No newline at end of file
+#endif // DAWNNATIVE_D3D12_STAGINGDESCRIPTORALLOCATOR_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
index f2a8388220e..e1d380c5fe9 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.cpp
@@ -21,7 +21,7 @@
namespace dawn_native { namespace d3d12 {
namespace {
- Origin3D ComputeTexelOffsets(const Format& format,
+ Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
uint32_t offset,
uint32_t bytesPerRow,
uint32_t slicePitch) {
@@ -32,20 +32,20 @@ namespace dawn_native { namespace d3d12 {
uint32_t byteOffsetY = offset % slicePitch;
uint32_t byteOffsetZ = offset - byteOffsetY;
- return {byteOffsetX / format.blockByteSize * format.blockWidth,
- byteOffsetY / bytesPerRow * format.blockHeight, byteOffsetZ / slicePitch};
+ return {byteOffsetX / blockInfo.blockByteSize * blockInfo.blockWidth,
+ byteOffsetY / bytesPerRow * blockInfo.blockHeight, byteOffsetZ / slicePitch};
}
} // namespace
- TextureCopySplit ComputeTextureCopySplit(Origin3D origin,
- Extent3D copySize,
- const Format& format,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- TextureCopySplit copy;
+ Texture2DCopySplit ComputeTextureCopySplit(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ Texture2DCopySplit copy;
- ASSERT(bytesPerRow % format.blockByteSize == 0);
+ ASSERT(bytesPerRow % blockInfo.blockByteSize == 0);
uint64_t alignedOffset =
offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
@@ -71,12 +71,14 @@ namespace dawn_native { namespace d3d12 {
ASSERT(alignedOffset < offset);
ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
- uint32_t slicePitch = bytesPerRow * (rowsPerImage / format.blockHeight);
+ uint32_t slicePitch = bytesPerRow * (rowsPerImage / blockInfo.blockHeight);
Origin3D texelOffset = ComputeTexelOffsets(
- format, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow, slicePitch);
+ blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow, slicePitch);
- uint32_t copyBytesPerRowPitch = copySize.width / format.blockWidth * format.blockByteSize;
- uint32_t byteOffsetInRowPitch = texelOffset.x / format.blockWidth * format.blockByteSize;
+ uint32_t copyBytesPerRowPitch =
+ copySize.width / blockInfo.blockWidth * blockInfo.blockByteSize;
+ uint32_t byteOffsetInRowPitch =
+ texelOffset.x / blockInfo.blockWidth * blockInfo.blockByteSize;
if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
// The region's rows fit inside the bytes per row. In this case, extend the width of the
// PlacedFootprint and copy the buffer with an offset location
@@ -154,7 +156,7 @@ namespace dawn_native { namespace d3d12 {
copy.copies[0].textureOffset = origin;
ASSERT(bytesPerRow > byteOffsetInRowPitch);
- uint32_t texelsPerRow = bytesPerRow / format.blockByteSize * format.blockWidth;
+ uint32_t texelsPerRow = bytesPerRow / blockInfo.blockByteSize * blockInfo.blockWidth;
copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
copy.copies[0].copySize.height = copySize.height;
copy.copies[0].copySize.depth = copySize.depth;
@@ -174,13 +176,59 @@ namespace dawn_native { namespace d3d12 {
copy.copies[1].copySize.depth = copySize.depth;
copy.copies[1].bufferOffset.x = 0;
- copy.copies[1].bufferOffset.y = texelOffset.y + format.blockHeight;
+ copy.copies[1].bufferOffset.y = texelOffset.y + blockInfo.blockHeight;
copy.copies[1].bufferOffset.z = texelOffset.z;
copy.copies[1].bufferSize.width = copy.copies[1].copySize.width;
- copy.copies[1].bufferSize.height = rowsPerImage + texelOffset.y + format.blockHeight;
+ copy.copies[1].bufferSize.height = rowsPerImage + texelOffset.y + blockInfo.blockHeight;
copy.copies[1].bufferSize.depth = copySize.depth + texelOffset.z;
return copy;
}
+ TextureCopySplits ComputeTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ TextureCopySplits copies;
+
+ const uint64_t bytesPerSlice = bytesPerRow * (rowsPerImage / blockInfo.blockHeight);
+
+ // The function ComputeTextureCopySplit() decides how to split the copy based on:
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+ // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
+ // Each slice of a 2D array or 3D copy might need to be split, but because of the WebGPU
+ // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) slices
+ // will be at an offset multiple of 512 of each other, which means they will all result in
+ // the same 2D split. Thus we can just compute the copy splits for the first and second
+ // slices, and reuse them for the remaining slices by adding the related offset of each
+ // slice. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
+ // share the same copy split, so in this situation we just need to compute copy split once
+ // and reuse it for all the slices.
+ const dawn_native::Extent3D copyOneLayerSize = {copySize.width, copySize.height, 1};
+ const dawn_native::Origin3D copyFirstLayerOrigin = {origin.x, origin.y, 0};
+
+ copies.copies2D[0] = ComputeTextureCopySplit(copyFirstLayerOrigin, copyOneLayerSize,
+ blockInfo, offset, bytesPerRow, rowsPerImage);
+
+ // When the copy only refers one texture 2D array layer copies.copies2D[1] will never be
+ // used so we can safely early return here.
+ if (copySize.depth == 1) {
+ return copies;
+ }
+
+ if (bytesPerSlice % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
+ copies.copies2D[1] = copies.copies2D[0];
+ copies.copies2D[1].offset += bytesPerSlice;
+ } else {
+ const uint64_t bufferOffsetNextLayer = offset + bytesPerSlice;
+ copies.copies2D[1] =
+ ComputeTextureCopySplit(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
+ bufferOffsetNextLayer, bytesPerRow, rowsPerImage);
+ }
+
+ return copies;
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
index cd7037bc0b4..962c33239df 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureCopySplitter.h
@@ -21,13 +21,13 @@
namespace dawn_native {
- struct Format;
+ struct TexelBlockInfo;
} // namespace dawn_native
namespace dawn_native { namespace d3d12 {
- struct TextureCopySplit {
+ struct Texture2DCopySplit {
static constexpr unsigned int kMaxTextureCopyRegions = 2;
struct CopyInfo {
@@ -43,12 +43,25 @@ namespace dawn_native { namespace d3d12 {
std::array<CopyInfo, kMaxTextureCopyRegions> copies;
};
- TextureCopySplit ComputeTextureCopySplit(Origin3D origin,
- Extent3D copySize,
- const Format& format,
- uint64_t offset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage);
+ struct TextureCopySplits {
+ static constexpr uint32_t kMaxTextureCopySplits = 2;
+
+ std::array<Texture2DCopySplit, kMaxTextureCopySplits> copies2D;
+ };
+
+ Texture2DCopySplit ComputeTextureCopySplit(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+ TextureCopySplits ComputeTextureCopySplits(Origin3D origin,
+ Extent3D copySize,
+ const TexelBlockInfo& blockInfo,
+ uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
index 7b86367c62c..3d87186e4ac 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.cpp
@@ -17,6 +17,7 @@
#include "common/Constants.h"
#include "common/Math.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Error.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/CommandRecordingContext.h"
@@ -78,13 +79,7 @@ namespace dawn_native { namespace d3d12 {
// A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
// D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
- // Currently all textures are zero-initialized via the render-target path so always add
- // the render target flag, except for compressed textures for which the render-target
- // flag is invalid.
- // TODO(natlee@microsoft.com, jiawei.shao@intel.com): do not require render target for
- // lazy clearing.
- if ((usage & wgpu::TextureUsage::OutputAttachment) || isMultisampledTexture ||
- !format.isCompressed) {
+ if ((usage & wgpu::TextureUsage::OutputAttachment) != 0 || isMultisampledTexture) {
if (format.HasDepthOrStencil()) {
flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
} else {
@@ -149,8 +144,10 @@ namespace dawn_native { namespace d3d12 {
case wgpu::TextureFormat::RGB10A2Unorm:
return DXGI_FORMAT_R10G10B10A2_TYPELESS;
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
case wgpu::TextureFormat::RG32Uint:
case wgpu::TextureFormat::RG32Sint:
@@ -263,8 +260,10 @@ namespace dawn_native { namespace d3d12 {
return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
case wgpu::TextureFormat::RGB10A2Unorm:
return DXGI_FORMAT_R10G10B10A2_UNORM;
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return DXGI_FORMAT_R11G11B10_FLOAT;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
case wgpu::TextureFormat::RG32Uint:
return DXGI_FORMAT_R32G32_UINT;
@@ -393,12 +392,6 @@ namespace dawn_native { namespace d3d12 {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
- // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
- // GPUTextureDescriptor.size.depth is done.
- TextureDescriptor fixedDescriptor;
- DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(device, textureDescriptor));
- textureDescriptor = &fixedDescriptor;
-
Ref<Texture> dawnTexture =
AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
DAWN_TRY(dawnTexture->InitializeAsExternalTexture(textureDescriptor, sharedHandle,
@@ -527,7 +520,7 @@ namespace dawn_native { namespace d3d12 {
if (mSwapChainTexture) {
ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
if (d3dSharingContract != nullptr) {
- d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource().Get(), 0, 0);
+ d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
}
}
@@ -544,7 +537,27 @@ namespace dawn_native { namespace d3d12 {
}
ID3D12Resource* Texture::GetD3D12Resource() const {
- return mResourceAllocation.GetD3D12Resource().Get();
+ return mResourceAllocation.GetD3D12Resource();
+ }
+
+ DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
+ ASSERT(GetFormat().aspects & aspect);
+
+ switch (GetFormat().format) {
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ switch (aspect) {
+ case Aspect::Depth:
+ return DXGI_FORMAT_R32_FLOAT;
+ case Aspect::Stencil:
+ return DXGI_FORMAT_R8_UINT;
+ default:
+ UNREACHABLE();
+ return GetD3D12Format();
+ }
+ default:
+ ASSERT(HasOneBit(GetFormat().aspects));
+ return GetD3D12Format();
+ }
}
void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
@@ -574,7 +587,15 @@ namespace dawn_native { namespace d3d12 {
}
std::vector<D3D12_RESOURCE_BARRIER> barriers;
- barriers.reserve(range.levelCount * range.layerCount);
+
+ // TODO(enga): Consider adding a Count helper.
+ uint32_t aspectCount = 0;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ aspectCount++;
+ DAWN_UNUSED(aspect);
+ }
+
+ barriers.reserve(range.levelCount * range.layerCount * aspectCount);
TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
if (barriers.size()) {
@@ -652,19 +673,6 @@ namespace dawn_native { namespace d3d12 {
barrier.Transition.Subresource =
allSubresources ? D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES : index;
barriers->push_back(barrier);
- // TODO(yunchao.he@intel.com): support subresource for depth/stencil. Depth stencil
- // texture has different plane slices. While the current implementation only has differernt
- // mip slices and array slices for subresources.
- // This is a hack because Dawn doesn't handle subresource of multiplanar resources
- // correctly. We force the transition to be the same for all planes to match what the
- // frontend validation checks for. This hack might be incorrect for stencil-only texture
- // because we always set transition barrier for depth plane.
- if (!allSubresources && newState == D3D12_RESOURCE_STATE_DEPTH_WRITE &&
- GetFormat().HasStencil()) {
- D3D12_RESOURCE_BARRIER barrierStencil = barrier;
- barrierStencil.Transition.Subresource += GetArrayLayers() * GetNumMipLevels();
- barriers->push_back(barrierStencil);
- }
state->isValidToDecay = false;
}
@@ -686,7 +694,6 @@ namespace dawn_native { namespace d3d12 {
HandleTransitionSpecialCases(commandContext);
const Serial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
- uint32_t subresourceCount = GetSubresourceCount();
// This transitions assume it is a 2D texture
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
@@ -695,25 +702,29 @@ namespace dawn_native { namespace d3d12 {
// are the same, then we can use one barrier to do state transition for all subresources.
// Note that if the texture has only one mip level and one array slice, it will fall into
// this category.
- bool areAllSubresourcesCovered = range.levelCount * range.layerCount == subresourceCount;
+ bool areAllSubresourcesCovered = (range.levelCount == GetNumMipLevels() && //
+ range.layerCount == GetArrayLayers() && //
+ range.aspects == GetFormat().aspects);
if (mSameLastUsagesAcrossSubresources && areAllSubresourcesCovered) {
TransitionSingleOrAllSubresources(barriers, 0, newState, pendingCommandSerial, true);
// TODO(yunchao.he@intel.com): compress and decompress if all subresources have the
// same states. We may need to retain mSubresourceStateAndDecay[0] only.
- for (uint32_t i = 1; i < subresourceCount; ++i) {
+ for (uint32_t i = 1; i < GetSubresourceCount(); ++i) {
mSubresourceStateAndDecay[i] = mSubresourceStateAndDecay[0];
}
return;
}
- for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
- for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
- uint32_t index = GetSubresourceIndex(range.baseMipLevel + mipLevel,
- range.baseArrayLayer + arrayLayer);
-
- TransitionSingleOrAllSubresources(barriers, index, newState, pendingCommandSerial,
- false);
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+ uint32_t index = GetSubresourceIndex(range.baseMipLevel + mipLevel,
+ range.baseArrayLayer + arrayLayer, aspect);
+
+ TransitionSingleOrAllSubresources(barriers, index, newState,
+ pendingCommandSerial, false);
+ }
}
}
mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered;
@@ -723,6 +734,12 @@ namespace dawn_native { namespace d3d12 {
CommandRecordingContext* commandContext,
std::vector<D3D12_RESOURCE_BARRIER>* barriers,
const PassTextureUsage& textureUsages) {
+ if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+ // Track the underlying heap to ensure residency.
+ Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+ commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
+ }
+
HandleTransitionSpecialCases(commandContext);
const Serial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
@@ -748,20 +765,22 @@ namespace dawn_native { namespace d3d12 {
return;
}
- for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
- for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
- uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer);
+ for (Aspect aspect : IterateEnumMask(GetFormat().aspects)) {
+ for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
+ for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
+ uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
- // Skip if this subresource is not used during the current pass
- if (textureUsages.subresourceUsages[index] == wgpu::TextureUsage::None) {
- continue;
- }
+ // Skip if this subresource is not used during the current pass
+ if (textureUsages.subresourceUsages[index] == wgpu::TextureUsage::None) {
+ continue;
+ }
- D3D12_RESOURCE_STATES newState =
- D3D12TextureUsage(textureUsages.subresourceUsages[index], GetFormat());
+ D3D12_RESOURCE_STATES newState =
+ D3D12TextureUsage(textureUsages.subresourceUsages[index], GetFormat());
- TransitionSingleOrAllSubresources(barriers, index, newState, pendingCommandSerial,
- false);
+ TransitionSingleOrAllSubresources(barriers, index, newState,
+ pendingCommandSerial, false);
+ }
}
}
mSameLastUsagesAcrossSubresources = textureUsages.sameUsagesAcrossSubresources;
@@ -834,20 +853,38 @@ namespace dawn_native { namespace d3d12 {
uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
- if (GetFormat().isRenderable) {
+ if ((GetUsage() & wgpu::TextureUsage::OutputAttachment) != 0) {
if (GetFormat().HasDepthOrStencil()) {
TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
- D3D12_CLEAR_FLAGS clearFlags = {};
-
for (uint32_t level = range.baseMipLevel;
level < range.baseMipLevel + range.levelCount; ++level) {
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
- // Skip lazy clears if already initialized.
+ // Iterate the aspects individually to determine which clear flags to use.
+ D3D12_CLEAR_FLAGS clearFlags = {};
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ switch (aspect) {
+ case Aspect::Depth:
+ clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+ break;
+ case Aspect::Stencil:
+ clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ if (clearFlags == 0) {
continue;
}
@@ -860,13 +897,6 @@ namespace dawn_native { namespace d3d12 {
device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(),
&dsvDesc, baseDescriptor);
- if (GetFormat().HasDepth()) {
- clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
- }
- if (GetFormat().HasStencil()) {
- clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
- }
-
commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
clearColor, 0, nullptr);
}
@@ -878,13 +908,14 @@ namespace dawn_native { namespace d3d12 {
const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor,
fClearColor};
+ ASSERT(range.aspects == Aspect::Color);
for (uint32_t level = range.baseMipLevel;
level < range.baseMipLevel + range.levelCount; ++level) {
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -902,59 +933,64 @@ namespace dawn_native { namespace d3d12 {
}
}
} else {
- // TODO(natlee@microsoft.com): test compressed textures are cleared
// create temp buffer with clear color to copy to the texture image
- uint32_t bytesPerRow =
- Align((GetWidth() / GetFormat().blockWidth) * GetFormat().blockByteSize,
- kTextureBytesPerRowAlignment);
- uint64_t bufferSize64 = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
- if (bufferSize64 > std::numeric_limits<uint32_t>::max()) {
- return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
- }
- uint32_t bufferSize = static_cast<uint32_t>(bufferSize64);
- DynamicUploader* uploader = device->GetDynamicUploader();
- UploadHandle uploadHandle;
- DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
- memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- // compute d3d12 texture copy locations for texture and buffer
- Extent3D copySize = GetMipLevelVirtualSize(level);
-
- uint32_t rowsPerImage = GetHeight();
- TextureCopySplit copySplit =
- ComputeTextureCopySplit({0, 0, 0}, copySize, GetFormat(),
- uploadHandle.startOffset, bytesPerRow, rowsPerImage);
-
- for (uint32_t layer = range.baseArrayLayer;
- layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
- // Skip lazy clears if already initialized.
- continue;
- }
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ const TexelBlockInfo& blockInfo = GetFormat().GetTexelBlockInfo(aspect);
+
+ uint32_t bytesPerRow =
+ Align((GetWidth() / blockInfo.blockWidth) * blockInfo.blockByteSize,
+ kTextureBytesPerRowAlignment);
+ uint64_t bufferSize64 = bytesPerRow * (GetHeight() / blockInfo.blockHeight);
+ if (bufferSize64 > std::numeric_limits<uint32_t>::max()) {
+ return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+ }
+ uint32_t bufferSize = static_cast<uint32_t>(bufferSize64);
+
+ DynamicUploader* uploader = device->GetDynamicUploader();
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ GetFormat().blockByteSize));
+ memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
- D3D12_TEXTURE_COPY_LOCATION textureLocation =
- ComputeTextureCopyLocationForTexture(this, level, layer);
- for (uint32_t i = 0; i < copySplit.count; ++i) {
- TextureCopySplit::CopyInfo& info = copySplit.copies[i];
-
- D3D12_TEXTURE_COPY_LOCATION bufferLocation =
- ComputeBufferLocationForCopyTextureRegion(
- this, ToBackend(uploadHandle.stagingBuffer)->GetResource(),
- info.bufferSize, copySplit.offset, bytesPerRow);
- D3D12_BOX sourceRegion =
- ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
- // copy the buffer filled with clear color to the texture
- commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
- info.textureOffset.y, info.textureOffset.z,
- &bufferLocation, &sourceRegion);
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ // compute d3d12 texture copy locations for texture and buffer
+ Extent3D copySize = GetMipLevelVirtualSize(level);
+
+ uint32_t rowsPerImage = GetHeight();
+ Texture2DCopySplit copySplit = ComputeTextureCopySplit(
+ {0, 0, 0}, copySize, blockInfo, uploadHandle.startOffset, bytesPerRow,
+ rowsPerImage);
+
+ for (uint32_t layer = range.baseArrayLayer;
+ layer < range.baseArrayLayer + range.layerCount; ++layer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(this, level, layer, aspect);
+ for (uint32_t i = 0; i < copySplit.count; ++i) {
+ Texture2DCopySplit::CopyInfo& info = copySplit.copies[i];
+
+ D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(
+ this, ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+ info.bufferSize, copySplit.offset, bytesPerRow, aspect);
+ D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ // copy the buffer filled with clear color to the texture
+ commandList->CopyTextureRegion(
+ &textureLocation, info.textureOffset.x, info.textureOffset.y,
+ info.textureOffset.z, &bufferLocation, &sourceRegion);
+ }
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
index ef17bf40628..d2df6bea69b 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/TextureD3D12.h
@@ -48,6 +48,7 @@ namespace dawn_native { namespace d3d12 {
DXGI_FORMAT GetD3D12Format() const;
ID3D12Resource* GetD3D12Resource() const;
+ DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(uint32_t mipLevel,
uint32_t baseArrayLayer,
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
index d8c20ef1613..283727486b8 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.cpp
@@ -15,6 +15,9 @@
#include "dawn_native/d3d12/UtilsD3D12.h"
#include "common/Assert.h"
+#include "dawn_native/Format.h"
+#include "dawn_native/d3d12/BufferD3D12.h"
+#include "dawn_native/d3d12/CommandRecordingContext.h"
#include <stringapiset.h>
@@ -64,11 +67,12 @@ namespace dawn_native { namespace d3d12 {
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
uint32_t level,
- uint32_t slice) {
+ uint32_t slice,
+ Aspect aspect) {
D3D12_TEXTURE_COPY_LOCATION copyLocation;
copyLocation.pResource = texture->GetD3D12Resource();
copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
- copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, slice);
+ copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, slice, aspect);
return copyLocation;
}
@@ -78,12 +82,14 @@ namespace dawn_native { namespace d3d12 {
ID3D12Resource* bufferResource,
const Extent3D& bufferSize,
const uint64_t offset,
- const uint32_t rowPitch) {
+ const uint32_t rowPitch,
+ Aspect aspect) {
D3D12_TEXTURE_COPY_LOCATION bufferLocation;
bufferLocation.pResource = bufferResource;
bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
bufferLocation.PlacedFootprint.Offset = offset;
- bufferLocation.PlacedFootprint.Footprint.Format = texture->GetD3D12Format();
+ bufferLocation.PlacedFootprint.Footprint.Format =
+ texture->GetD3D12CopyableSubresourceFormat(aspect);
bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depth;
@@ -102,4 +108,112 @@ namespace dawn_native { namespace d3d12 {
return sourceRegion;
}
+ bool IsTypeless(DXGI_FORMAT format) {
+ // List generated from <dxgiformat.h>
+ switch (format) {
+ case DXGI_FORMAT_R32G32B32A32_TYPELESS:
+ case DXGI_FORMAT_R32G32B32_TYPELESS:
+ case DXGI_FORMAT_R16G16B16A16_TYPELESS:
+ case DXGI_FORMAT_R32G32_TYPELESS:
+ case DXGI_FORMAT_R32G8X24_TYPELESS:
+ case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+ case DXGI_FORMAT_R10G10B10A2_TYPELESS:
+ case DXGI_FORMAT_R8G8B8A8_TYPELESS:
+ case DXGI_FORMAT_R16G16_TYPELESS:
+ case DXGI_FORMAT_R32_TYPELESS:
+ case DXGI_FORMAT_R24G8_TYPELESS:
+ case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
+ case DXGI_FORMAT_R8G8_TYPELESS:
+ case DXGI_FORMAT_R16_TYPELESS:
+ case DXGI_FORMAT_R8_TYPELESS:
+ case DXGI_FORMAT_BC1_TYPELESS:
+ case DXGI_FORMAT_BC2_TYPELESS:
+ case DXGI_FORMAT_BC3_TYPELESS:
+ case DXGI_FORMAT_BC4_TYPELESS:
+ case DXGI_FORMAT_BC5_TYPELESS:
+ case DXGI_FORMAT_B8G8R8A8_TYPELESS:
+ case DXGI_FORMAT_B8G8R8X8_TYPELESS:
+ case DXGI_FORMAT_BC6H_TYPELESS:
+ case DXGI_FORMAT_BC7_TYPELESS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
+ const Texture2DCopySplit& baseCopySplit,
+ ID3D12Resource* bufferResource,
+ uint64_t baseOffsetBytes,
+ uint64_t bufferBytesPerRow,
+ Texture* texture,
+ uint32_t textureMiplevel,
+ uint32_t textureSlice,
+ Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+ ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
+
+ const uint64_t offsetBytes = baseCopySplit.offset + baseOffsetBytes;
+
+ for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+ const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
+
+ // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+ // members in Texture2DCopySplit::CopyInfo.
+ const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+ ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
+ offsetBytes, bufferBytesPerRow, aspect);
+ const D3D12_BOX sourceRegion =
+ ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+ commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+ info.textureOffset.y, info.textureOffset.z,
+ &bufferLocation, &sourceRegion);
+ }
+ }
+
+ void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize,
+ Texture* texture,
+ ID3D12Resource* bufferResource,
+ const uint64_t offsetBytes,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ // See comments in ComputeTextureCopySplits() for more details.
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetTexelBlockInfo(aspect);
+ const TextureCopySplits copySplits = ComputeTextureCopySplits(
+ textureCopy.origin, copySize, blockInfo, offsetBytes, bytesPerRow, rowsPerImage);
+
+ const uint64_t bytesPerSlice = bytesPerRow * (rowsPerImage / blockInfo.blockHeight);
+
+ // copySplits.copies2D[1] is always calculated for the second copy slice with
+ // extra "bytesPerSlice" copy offset compared with the first copy slice. So
+ // here we use an array bufferOffsetsForNextSlice to record the extra offsets
+ // for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
+ // the next copy slice that uses copySplits.copies2D[0], and
+ // bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
+ // that uses copySplits.copies2D[1].
+ std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
+ {0u, 0u}};
+
+ for (uint32_t copySlice = 0; copySlice < copySize.depth; ++copySlice) {
+ const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
+
+ const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
+ const uint64_t bufferOffsetForNextSlice = bufferOffsetsForNextSlice[splitIndex];
+ const uint32_t copyTextureLayer = copySlice + textureCopy.origin.z;
+
+ RecordCopyBufferToTextureFromTextureCopySplit(
+ commandContext->GetCommandList(), copySplitPerLayerBase, bufferResource,
+ bufferOffsetForNextSlice, bytesPerRow, texture, textureCopy.mipLevel,
+ copyTextureLayer, aspect);
+
+ bufferOffsetsForNextSlice[splitIndex] += bytesPerSlice * copySplits.copies2D.size();
+ }
+ }
+
}} // namespace dawn_native::d3d12
diff --git a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
index d1559e72a14..6109c0f60b3 100644
--- a/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
+++ b/chromium/third_party/dawn/src/dawn_native/d3d12/UtilsD3D12.h
@@ -15,6 +15,7 @@
#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
#define DAWNNATIVE_D3D12_UTILSD3D12_H_
+#include "dawn_native/Commands.h"
#include "dawn_native/d3d12/BufferD3D12.h"
#include "dawn_native/d3d12/TextureCopySplitter.h"
#include "dawn_native/d3d12/TextureD3D12.h"
@@ -29,16 +30,40 @@ namespace dawn_native { namespace d3d12 {
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
uint32_t level,
- uint32_t slice);
+ uint32_t slice,
+ Aspect aspect);
D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
const Texture* texture,
ID3D12Resource* bufferResource,
const Extent3D& bufferSize,
const uint64_t offset,
- const uint32_t rowPitch);
+ const uint32_t rowPitch,
+ Aspect aspect);
D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
+ bool IsTypeless(DXGI_FORMAT format);
+
+ void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
+ const Texture2DCopySplit& baseCopySplit,
+ Buffer* buffer,
+ uint64_t baseOffset,
+ uint64_t bufferBytesPerRow,
+ Texture* texture,
+ uint32_t textureMiplevel,
+ uint32_t textureSlice,
+ Aspect aspect);
+
+ void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize,
+ Texture* texture,
+ ID3D12Resource* bufferResource,
+ const uint64_t offset,
+ const uint32_t bytesPerRow,
+ const uint32_t rowsPerImage,
+ Aspect aspect);
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
index 98bab96244c..ad6ed5faac9 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.h
@@ -22,28 +22,39 @@
namespace dawn_native { namespace metal {
+ class CommandRecordingContext;
class Device;
class Buffer : public BufferBase {
public:
- static ResultOrError<Buffer*> Create(Device* device, const BufferDescriptor* descriptor);
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
id<MTLBuffer> GetMTLBuffer() const;
+ void EnsureDataInitialized(CommandRecordingContext* commandContext);
+ void EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size);
+ void EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy);
+
private:
using BufferBase::BufferBase;
MaybeError Initialize();
~Buffer() override;
// Dawn API
- MaybeError MapReadAsyncImpl(uint32_t serial) override;
- MaybeError MapWriteAsyncImpl(uint32_t serial) override;
+ MaybeError MapReadAsyncImpl() override;
+ MaybeError MapWriteAsyncImpl() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
void* GetMappedPointerImpl() override;
- bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ bool IsMappableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
- void ClearBuffer(BufferBase::ClearValue clearValue);
+ void InitializeToZero(CommandRecordingContext* commandContext);
+ void ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue);
id<MTLBuffer> mMtlBuffer = nil;
};
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
index ccfd3b39ecb..2c82a7cf721 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/BufferMTL.mm
@@ -15,6 +15,8 @@
#include "dawn_native/metal/BufferMTL.h"
#include "common/Math.h"
+#include "dawn_native/CommandBuffer.h"
+#include "dawn_native/metal/CommandRecordingContext.h"
#include "dawn_native/metal/DeviceMTL.h"
#include <limits>
@@ -29,10 +31,10 @@ namespace dawn_native { namespace metal {
static constexpr uint32_t kMaxBufferSizeFallback = 1024u * 1024u * 1024u;
// static
- ResultOrError<Buffer*> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
DAWN_TRY(buffer->Initialize());
- return buffer.Detach();
+ return std::move(buffer);
}
MaybeError Buffer::Initialize() {
@@ -67,6 +69,7 @@ namespace dawn_native { namespace metal {
if (currentSize > maxBufferSize) {
return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
+#if defined(DAWN_PLATFORM_MACOS)
} else if (@available(macOS 10.12, *)) {
// |maxBufferLength| isn't always available on older systems. If available, use
// |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
@@ -76,6 +79,7 @@ namespace dawn_native { namespace metal {
if (currentSize > maxWorkingSetSize) {
return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
+#endif
} else if (currentSize > kMaxBufferSizeFallback) {
return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
}
@@ -87,7 +91,9 @@ namespace dawn_native { namespace metal {
}
if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- ClearBuffer(BufferBase::ClearValue::NonZero);
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ ClearBuffer(commandContext, uint8_t(1u));
}
return {};
@@ -101,26 +107,37 @@ namespace dawn_native { namespace metal {
return mMtlBuffer;
}
- bool Buffer::IsMapWritable() const {
+ bool Buffer::IsMappableAtCreation() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = reinterpret_cast<uint8_t*>([mMtlBuffer contents]);
+ MaybeError Buffer::MapAtCreationImpl() {
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ EnsureDataInitialized(commandContext);
+
+ return {};
+ }
+
+ MaybeError Buffer::MapReadAsyncImpl() {
return {};
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapWriteAsyncImpl() {
return {};
}
- MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ CommandRecordingContext* commandContext =
+ ToBackend(GetDevice())->GetPendingCommandContext();
+ EnsureDataInitialized(commandContext);
+
return {};
}
void* Buffer::GetMappedPointerImpl() {
- return reinterpret_cast<uint8_t*>([mMtlBuffer contents]);
+ return [mMtlBuffer contents];
}
void Buffer::UnmapImpl() {
@@ -132,16 +149,65 @@ namespace dawn_native { namespace metal {
mMtlBuffer = nil;
}
- void Buffer::ClearBuffer(BufferBase::ClearValue clearValue) {
- // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
- ASSERT(clearValue == BufferBase::ClearValue::NonZero);
- const uint8_t clearBufferValue = 1;
+ void Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ InitializeToZero(commandContext);
+ }
+
+ void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ uint64_t offset,
+ uint64_t size) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero(commandContext);
+ }
+ }
+
+ void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+ const CopyTextureToBufferCmd* copy) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero(commandContext);
+ }
+ }
+
+ void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
+ ASSERT(!IsDataInitialized());
+
+ ClearBuffer(commandContext, uint8_t(0u));
+
+ SetIsDataInitialized();
+ GetDevice()->IncrementLazyClearCountForTesting();
+ }
- Device* device = ToBackend(GetDevice());
- CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+ void Buffer::ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue) {
+ ASSERT(commandContext != nullptr);
[commandContext->EnsureBlit() fillBuffer:mMtlBuffer
range:NSMakeRange(0, GetSize())
- value:clearBufferValue];
+ value:clearValue];
}
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
index e8c81b7dc28..3410e57954d 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.h
@@ -15,8 +15,8 @@
#ifndef DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
#define DAWNNATIVE_METAL_COMMANDBUFFERMTL_H_
-#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
+#include "dawn_native/Error.h"
#import <Metal/Metal.h>
@@ -33,22 +33,19 @@ namespace dawn_native { namespace metal {
public:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- void FillCommands(CommandRecordingContext* commandContext);
+ MaybeError FillCommands(CommandRecordingContext* commandContext);
private:
- ~CommandBuffer() override;
- void EncodeComputePass(CommandRecordingContext* commandContext);
- void EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
-
- void EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height);
-
- CommandIterator mCommands;
+ MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
+ MaybeError EncodeRenderPass(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height);
+
+ MaybeError EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height);
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
index 64c098dce51..351090aaec4 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/CommandBufferMTL.mm
@@ -26,6 +26,7 @@
#include "dawn_native/metal/RenderPipelineMTL.h"
#include "dawn_native/metal/SamplerMTL.h"
#include "dawn_native/metal/TextureMTL.h"
+#include "dawn_native/metal/UtilsMetal.h"
namespace dawn_native { namespace metal {
@@ -262,7 +263,9 @@ namespace dawn_native { namespace metal {
// MSL code generated by SPIRV-Cross expects.
PerStage<std::array<uint32_t, kGenericMetalBufferSlots>> data;
- void Apply(id<MTLRenderCommandEncoder> render, RenderPipeline* pipeline) {
+ void Apply(id<MTLRenderCommandEncoder> render,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
wgpu::ShaderStage stagesToApply =
dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
@@ -273,6 +276,11 @@ namespace dawn_native { namespace metal {
if (stagesToApply & wgpu::ShaderStage::Vertex) {
uint32_t bufferCount = ToBackend(pipeline->GetLayout())
->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+ if (enableVertexPulling) {
+ bufferCount += pipeline->GetVertexStateDescriptor()->vertexBufferCount;
+ }
+
[render setVertexBytes:data[SingleShaderStage::Vertex].data()
length:sizeof(uint32_t) * bufferCount
atIndex:kBufferLengthBufferSlot];
@@ -309,159 +317,6 @@ namespace dawn_native { namespace metal {
}
};
- struct TextureBufferCopySplit {
- static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
-
- struct CopyInfo {
- NSUInteger bufferOffset;
- NSUInteger bytesPerRow;
- NSUInteger bytesPerImage;
- Origin3D textureOrigin;
- Extent3D copyExtent;
- };
-
- uint32_t count = 0;
- std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
- };
-
- MTLOrigin MakeMTLOrigin(Origin3D origin) {
- return MTLOriginMake(origin.x, origin.y, origin.z);
- }
-
- TextureBufferCopySplit ComputeTextureBufferCopySplit(wgpu::TextureDimension dimension,
- Origin3D origin,
- Extent3D copyExtent,
- Format textureFormat,
- Extent3D virtualSizeAtLevel,
- uint64_t bufferSize,
- uint64_t bufferOffset,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage) {
- TextureBufferCopySplit copy;
-
- // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
- // compute the correct range when checking if the buffer is big enough to contain the
- // data for the whole copy. Instead of looking at the position of the last texel in the
- // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
- // format.blockHeight) * copySize.depth. For example considering the pixel buffer below
- // where in memory, each row data (D) of the texture is followed by some padding data
- // (P):
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDD|PP|
- // |DDDDDDA|PP|
- // The last pixel read will be A, but the driver will think it is the whole last padding
- // row, causing it to generate an error when the pixel buffer is just big enough.
-
- // We work around this limitation by detecting when Metal would complain and copy the
- // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
- uint32_t dataRowsPerImage = rowsPerImage / textureFormat.blockHeight;
- uint32_t bytesPerImage = bytesPerRow * dataRowsPerImage;
-
- // Metal validation layer requires that if the texture's pixel format is a compressed
- // format, the sourceSize must be a multiple of the pixel format's block size or be
- // clamped to the edge of the texture if the block extends outside the bounds of a
- // texture.
- uint32_t clampedCopyExtentWidth =
- (origin.x + copyExtent.width > virtualSizeAtLevel.width)
- ? (virtualSizeAtLevel.width - origin.x)
- : copyExtent.width;
- uint32_t clampedCopyExtentHeight =
- (origin.y + copyExtent.height > virtualSizeAtLevel.height)
- ? (virtualSizeAtLevel.height - origin.y)
- : copyExtent.height;
-
- ASSERT(dimension == wgpu::TextureDimension::e2D);
-
- // Check whether buffer size is big enough.
- bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depth;
- if (!needWorkaround) {
- copy.count = 1;
- copy.copies[0].bufferOffset = bufferOffset;
- copy.copies[0].bytesPerRow = bytesPerRow;
- copy.copies[0].bytesPerImage = bytesPerImage;
- copy.copies[0].textureOrigin = origin;
- copy.copies[0].copyExtent = {clampedCopyExtentWidth, clampedCopyExtentHeight,
- copyExtent.depth};
- return copy;
- }
-
- uint64_t currentOffset = bufferOffset;
-
- // Doing all the copy except the last image.
- if (copyExtent.depth > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerImage;
- copy.copies[copy.count].textureOrigin = origin;
- copy.copies[copy.count].copyExtent = {
- clampedCopyExtentWidth, clampedCopyExtentHeight, copyExtent.depth - 1};
-
- ++copy.count;
-
- // Update offset to copy to the last image.
- currentOffset += (copyExtent.depth - 1) * bytesPerImage;
- }
-
- // Doing all the copy in last image except the last row.
- uint32_t copyBlockRowCount = copyExtent.height / textureFormat.blockHeight;
- if (copyBlockRowCount > 1) {
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = bytesPerRow;
- copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
- copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
- origin.z + copyExtent.depth - 1};
-
- ASSERT(copyExtent.height - textureFormat.blockHeight < virtualSizeAtLevel.height);
- copy.copies[copy.count].copyExtent = {
- clampedCopyExtentWidth, copyExtent.height - textureFormat.blockHeight, 1};
-
- ++copy.count;
-
- // Update offset to copy to the last row.
- currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
- }
-
- // Doing the last row copy with the exact number of bytes in last row.
- // Workaround this issue in a way just like the copy to a 1D texture.
- uint32_t lastRowDataSize =
- (copyExtent.width / textureFormat.blockWidth) * textureFormat.blockByteSize;
- uint32_t lastRowCopyExtentHeight =
- textureFormat.blockHeight + clampedCopyExtentHeight - copyExtent.height;
- ASSERT(lastRowCopyExtentHeight <= textureFormat.blockHeight);
-
- copy.copies[copy.count].bufferOffset = currentOffset;
- copy.copies[copy.count].bytesPerRow = lastRowDataSize;
- copy.copies[copy.count].bytesPerImage = lastRowDataSize;
- copy.copies[copy.count].textureOrigin = {
- origin.x, origin.y + copyExtent.height - textureFormat.blockHeight,
- origin.z + copyExtent.depth - 1};
- copy.copies[copy.count].copyExtent = {clampedCopyExtentWidth, lastRowCopyExtentHeight,
- 1};
- ++copy.count;
-
- return copy;
- }
-
- void EnsureSourceTextureInitialized(Texture* texture,
- const Extent3D& size,
- const TextureCopy& src) {
- texture->EnsureSubresourceContentInitialized(
- {src.mipLevel, 1, src.arrayLayer, size.depth});
- }
-
- void EnsureDestinationTextureInitialized(Texture* texture,
- const Extent3D& size,
- const TextureCopy& dst) {
- SubresourceRange range = {dst.mipLevel, 1, dst.arrayLayer, size.depth};
- if (IsCompleteSubresourceCopiedTo(texture, size, dst.mipLevel)) {
- texture->SetIsSubresourceContentInitialized(true, range);
- } else {
- texture->EnsureSubresourceContentInitialized(range);
- }
- }
-
// Keeps track of the dirty bind groups so they can be lazily applied when we know the
// pipeline state.
// Bind groups may be inherited because bind groups are packed in the buffer /
@@ -635,10 +490,17 @@ namespace dawn_native { namespace metal {
// all the relevant state.
class VertexBufferTracker {
public:
+ explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
+ : mLengthTracker(lengthTracker) {
+ }
+
void OnSetVertexBuffer(uint32_t slot, Buffer* buffer, uint64_t offset) {
mVertexBuffers[slot] = buffer->GetMTLBuffer();
mVertexBufferOffsets[slot] = offset;
+ ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
+ mVertexBufferBindingSizes[slot] = static_cast<uint32_t>(buffer->GetSize() - offset);
+
// Use 64 bit masks and make sure there are no shift UB
static_assert(kMaxVertexBuffers <= 8 * sizeof(unsigned long long) - 1, "");
mDirtyVertexBuffers |= 1ull << slot;
@@ -651,13 +513,22 @@ namespace dawn_native { namespace metal {
mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
}
- void Apply(id<MTLRenderCommandEncoder> encoder, RenderPipeline* pipeline) {
+ void Apply(id<MTLRenderCommandEncoder> encoder,
+ RenderPipeline* pipeline,
+ bool enableVertexPulling) {
std::bitset<kMaxVertexBuffers> vertexBuffersToApply =
mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
for (uint32_t dawnIndex : IterateBitSet(vertexBuffersToApply)) {
uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(dawnIndex);
+ if (enableVertexPulling) {
+ // Insert lengths for vertex buffers bound as storage buffers
+ mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
+ mVertexBufferBindingSizes[dawnIndex];
+ mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+ }
+
[encoder setVertexBuffers:&mVertexBuffers[dawnIndex]
offsets:&mVertexBufferOffsets[dawnIndex]
withRange:NSMakeRange(metalIndex, 1)];
@@ -671,23 +542,23 @@ namespace dawn_native { namespace metal {
std::bitset<kMaxVertexBuffers> mDirtyVertexBuffers;
std::array<id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
std::array<NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
+ std::array<uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
+
+ StorageBufferLengthTracker* mLengthTracker;
};
} // anonymous namespace
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
- }
-
- CommandBuffer::~CommandBuffer() {
- FreeCommands(&mCommands);
+ : CommandBufferBase(encoder, descriptor) {
}
- void CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+ MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
size_t nextPassNumber = 0;
- auto LazyClearForPass = [](const PassResourceUsage& usages) {
+ auto LazyClearForPass = [](const PassResourceUsage& usages,
+ CommandRecordingContext* commandContext) {
for (size_t i = 0; i < usages.textures.size(); ++i) {
Texture* texture = ToBackend(usages.textures[i]);
// Clear textures that are not output attachments. Output attachments will be
@@ -697,6 +568,9 @@ namespace dawn_native { namespace metal {
texture->EnsureSubresourceContentInitialized(texture->GetAllSubresources());
}
}
+ for (BufferBase* bufferBase : usages.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+ }
};
Command type;
@@ -705,10 +579,10 @@ namespace dawn_native { namespace metal {
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
- LazyClearForPass(passResourceUsages[nextPassNumber]);
+ LazyClearForPass(passResourceUsages[nextPassNumber], commandContext);
commandContext->EndBlit();
- EncodeComputePass(commandContext);
+ DAWN_TRY(EncodeComputePass(commandContext));
nextPassNumber++;
break;
@@ -717,12 +591,12 @@ namespace dawn_native { namespace metal {
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
- LazyClearForPass(passResourceUsages[nextPassNumber]);
+ LazyClearForPass(passResourceUsages[nextPassNumber], commandContext);
commandContext->EndBlit();
LazyClearRenderPassAttachments(cmd);
MTLRenderPassDescriptor* descriptor = CreateMTLRenderPassDescriptor(cmd);
- EncodeRenderPass(commandContext, descriptor, cmd->width, cmd->height);
+ DAWN_TRY(EncodeRenderPass(commandContext, descriptor, cmd->width, cmd->height));
nextPassNumber++;
break;
@@ -731,6 +605,11 @@ namespace dawn_native { namespace metal {
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ ToBackend(copy->source)->EnsureDataInitialized(commandContext);
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(commandContext,
+ copy->destinationOffset, copy->size);
+
[commandContext->EnsureBlit()
copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
sourceOffset:copy->sourceOffset
@@ -748,17 +627,12 @@ namespace dawn_native { namespace metal {
Buffer* buffer = ToBackend(src.buffer.Get());
Texture* texture = ToBackend(dst.texture.Get());
- EnsureDestinationTextureInitialized(texture, copy->copySize, copy->destination);
-
- const Extent3D virtualSizeAtLevel =
- texture->GetMipLevelVirtualSize(dst.mipLevel);
+ buffer->EnsureDataInitialized(commandContext);
+ EnsureDestinationTextureInitialized(texture, copy->destination, copy->copySize);
- Origin3D copyOrigin = dst.origin;
- copyOrigin.z = dst.arrayLayer;
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
- texture->GetDimension(), copyOrigin, copySize, texture->GetFormat(),
- virtualSizeAtLevel, buffer->GetSize(), src.offset, src.bytesPerRow,
- src.rowsPerImage);
+ texture, dst.mipLevel, dst.origin, copySize, buffer->GetSize(), src.offset,
+ src.bytesPerRow, src.rowsPerImage);
for (uint32_t i = 0; i < splitCopies.count; ++i) {
const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
@@ -770,6 +644,9 @@ namespace dawn_native { namespace metal {
const MTLSize copyExtent =
MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+ MTLBlitOption blitOption =
+ ComputeMTLBlitOption(texture->GetFormat(), dst.aspect);
+
uint64_t bufferOffset = copyInfo.bufferOffset;
for (uint32_t copyLayer = copyBaseLayer;
copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
@@ -781,7 +658,8 @@ namespace dawn_native { namespace metal {
toTexture:texture->GetMTLTexture()
destinationSlice:copyLayer
destinationLevel:dst.mipLevel
- destinationOrigin:textureOrigin];
+ destinationOrigin:textureOrigin
+ options:blitOption];
bufferOffset += copyInfo.bytesPerImage;
}
}
@@ -797,15 +675,14 @@ namespace dawn_native { namespace metal {
Texture* texture = ToBackend(src.texture.Get());
Buffer* buffer = ToBackend(dst.buffer.Get());
- EnsureSourceTextureInitialized(texture, copy->copySize, copy->source);
+ buffer->EnsureDataInitializedAsDestination(commandContext, copy);
+
+ texture->EnsureSubresourceContentInitialized(
+ GetSubresourcesAffectedByCopy(src, copySize));
- Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(src.mipLevel);
- Origin3D copyOrigin = src.origin;
- copyOrigin.z = src.arrayLayer;
TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
- texture->GetDimension(), copyOrigin, copySize, texture->GetFormat(),
- virtualSizeAtLevel, buffer->GetSize(), dst.offset, dst.bytesPerRow,
- dst.rowsPerImage);
+ texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
+ dst.bytesPerRow, dst.rowsPerImage);
for (uint32_t i = 0; i < splitCopies.count; ++i) {
const TextureBufferCopySplit::CopyInfo& copyInfo = splitCopies.copies[i];
@@ -817,6 +694,9 @@ namespace dawn_native { namespace metal {
const MTLSize copyExtent =
MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
+ MTLBlitOption blitOption =
+ ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
+
uint64_t bufferOffset = copyInfo.bufferOffset;
for (uint32_t copyLayer = copyBaseLayer;
copyLayer < copyBaseLayer + copyLayerCount; ++copyLayer) {
@@ -828,7 +708,8 @@ namespace dawn_native { namespace metal {
toBuffer:buffer->GetMTLBuffer()
destinationOffset:bufferOffset
destinationBytesPerRow:copyInfo.bytesPerRow
- destinationBytesPerImage:copyInfo.bytesPerImage];
+ destinationBytesPerImage:copyInfo.bytesPerImage
+ options:blitOption];
bufferOffset += copyInfo.bytesPerImage;
}
}
@@ -842,31 +723,44 @@ namespace dawn_native { namespace metal {
Texture* srcTexture = ToBackend(copy->source.texture.Get());
Texture* dstTexture = ToBackend(copy->destination.texture.Get());
- EnsureSourceTextureInitialized(srcTexture, copy->copySize, copy->source);
- EnsureDestinationTextureInitialized(dstTexture, copy->copySize,
- copy->destination);
+ srcTexture->EnsureSubresourceContentInitialized(
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
+ EnsureDestinationTextureInitialized(dstTexture, copy->destination,
+ copy->copySize);
// TODO(jiawei.shao@intel.com): support copies with 1D and 3D textures.
ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
- const MTLSize mtlSizeOneLayer =
+ const MTLSize sizeOneLayer =
MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
+ const MTLOrigin sourceOriginNoLayer =
+ MTLOriginMake(copy->source.origin.x, copy->source.origin.y, 0);
+ const MTLOrigin destinationOriginNoLayer =
+ MTLOriginMake(copy->destination.origin.x, copy->destination.origin.y, 0);
+
for (uint32_t slice = 0; slice < copy->copySize.depth; ++slice) {
[commandContext->EnsureBlit()
copyFromTexture:srcTexture->GetMTLTexture()
- sourceSlice:copy->source.arrayLayer + slice
+ sourceSlice:copy->source.origin.z + slice
sourceLevel:copy->source.mipLevel
- sourceOrigin:MakeMTLOrigin(copy->source.origin)
- sourceSize:mtlSizeOneLayer
+ sourceOrigin:sourceOriginNoLayer
+ sourceSize:sizeOneLayer
toTexture:dstTexture->GetMTLTexture()
- destinationSlice:copy->destination.arrayLayer + slice
+ destinationSlice:copy->destination.origin.z + slice
destinationLevel:copy->destination.mipLevel
- destinationOrigin:MakeMTLOrigin(copy->destination.origin)];
+ destinationOrigin:destinationOriginNoLayer];
}
-
break;
}
+ case Command::ResolveQuerySet: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
UNREACHABLE();
break;
@@ -875,9 +769,10 @@ namespace dawn_native { namespace metal {
}
commandContext->EndBlit();
+ return {};
}
- void CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
+ MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
ComputePipeline* lastPipeline = nullptr;
StorageBufferLengthTracker storageBufferLengths = {};
BindGroupTracker bindGroups(&storageBufferLengths);
@@ -890,7 +785,7 @@ namespace dawn_native { namespace metal {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
commandContext->EndCompute();
- return;
+ return {};
}
case Command::Dispatch: {
@@ -968,6 +863,10 @@ namespace dawn_native { namespace metal {
break;
}
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
UNREACHABLE();
break;
@@ -979,10 +878,10 @@ namespace dawn_native { namespace metal {
UNREACHABLE();
}
- void CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
+ MaybeError CommandBuffer::EncodeRenderPass(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height) {
ASSERT(mtlRenderPass);
Device* device = ToBackend(GetDevice());
@@ -1026,7 +925,7 @@ namespace dawn_native { namespace metal {
// If we need to use a temporary resolve texture we need to copy the result of MSAA
// resolve back to the true resolve targets.
if (useTemporaryResolveTexture) {
- EncodeRenderPass(commandContext, mtlRenderPass, width, height);
+ DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
if (trueResolveTextures[i] == nil) {
continue;
@@ -1039,7 +938,7 @@ namespace dawn_native { namespace metal {
[temporaryResolveTextures[i] release];
temporaryResolveTextures[i] = nil;
}
- return;
+ return {};
}
}
@@ -1062,24 +961,26 @@ namespace dawn_native { namespace metal {
// If we found a store + MSAA resolve we need to resolve in a different render pass.
if (hasStoreAndMSAAResolve) {
- EncodeRenderPass(commandContext, mtlRenderPass, width, height);
+ DAWN_TRY(EncodeRenderPass(commandContext, mtlRenderPass, width, height));
ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
- return;
+ return {};
}
}
- EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height);
+ DAWN_TRY(EncodeRenderPassInternal(commandContext, mtlRenderPass, width, height));
+ return {};
}
- void CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
- MTLRenderPassDescriptor* mtlRenderPass,
- uint32_t width,
- uint32_t height) {
+ MaybeError CommandBuffer::EncodeRenderPassInternal(CommandRecordingContext* commandContext,
+ MTLRenderPassDescriptor* mtlRenderPass,
+ uint32_t width,
+ uint32_t height) {
+ bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
RenderPipeline* lastPipeline = nullptr;
id<MTLBuffer> indexBuffer = nil;
uint32_t indexBufferBaseOffset = 0;
- VertexBufferTracker vertexBuffers;
StorageBufferLengthTracker storageBufferLengths = {};
+ VertexBufferTracker vertexBuffers(&storageBufferLengths);
BindGroupTracker bindGroups(&storageBufferLengths);
id<MTLRenderCommandEncoder> encoder = commandContext->BeginRender(mtlRenderPass);
@@ -1089,9 +990,9 @@ namespace dawn_native { namespace metal {
case Command::Draw: {
DrawCmd* draw = iter->NextCommand<DrawCmd>();
- vertexBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
// The instance count must be non-zero, otherwise no-op
if (draw->instanceCount != 0) {
@@ -1117,9 +1018,9 @@ namespace dawn_native { namespace metal {
size_t formatSize =
IndexFormatSize(lastPipeline->GetVertexStateDescriptor()->indexFormat);
- vertexBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
// The index and instance count must be non-zero, otherwise no-op
if (draw->indexCount != 0 && draw->instanceCount != 0) {
@@ -1151,9 +1052,9 @@ namespace dawn_native { namespace metal {
case Command::DrawIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
@@ -1166,9 +1067,9 @@ namespace dawn_native { namespace metal {
case Command::DrawIndexedIndirect: {
DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
- vertexBuffers.Apply(encoder, lastPipeline);
+ vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
bindGroups.Apply(encoder);
- storageBufferLengths.Apply(encoder, lastPipeline);
+ storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
@@ -1264,7 +1165,7 @@ namespace dawn_native { namespace metal {
case Command::EndRenderPass: {
mCommands.NextCommand<EndRenderPassCmd>();
commandContext->EndRender();
- return;
+ return {};
}
case Command::SetStencilReference: {
@@ -1331,6 +1232,10 @@ namespace dawn_native { namespace metal {
break;
}
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
EncodeRenderBundleCommand(&mCommands, type);
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
index 76e41518918..8c4bc493278 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.h
@@ -18,6 +18,7 @@
#include "dawn_native/dawn_platform.h"
#include "common/Serial.h"
+#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/metal/CommandRecordingContext.h"
#include "dawn_native/metal/Forward.h"
@@ -63,6 +64,10 @@ namespace dawn_native { namespace metal {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels);
private:
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
@@ -71,7 +76,8 @@ namespace dawn_native { namespace metal {
const BindGroupDescriptor* descriptor) override;
ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
- ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
index d8d0feb6a16..4004d788b3e 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/DeviceMTL.mm
@@ -14,8 +14,10 @@
#include "dawn_native/metal/DeviceMTL.h"
+#include "common/Platform.h"
#include "dawn_native/BackendConnection.h"
#include "dawn_native/BindGroupLayout.h"
+#include "dawn_native/Commands.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/metal/BindGroupLayoutMTL.h"
#include "dawn_native/metal/BindGroupMTL.h"
@@ -30,6 +32,7 @@
#include "dawn_native/metal/StagingBufferMTL.h"
#include "dawn_native/metal/SwapChainMTL.h"
#include "dawn_native/metal/TextureMTL.h"
+#include "dawn_native/metal/UtilsMetal.h"
#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
@@ -49,9 +52,7 @@ namespace dawn_native { namespace metal {
Device::Device(AdapterBase* adapter,
id<MTLDevice> mtlDevice,
const DeviceDescriptor* descriptor)
- : DeviceBase(adapter, descriptor),
- mMtlDevice([mtlDevice retain]),
- mCompletedSerial(0) {
+ : DeviceBase(adapter, descriptor), mMtlDevice([mtlDevice retain]), mCompletedSerial(0) {
[mMtlDevice retain];
}
@@ -61,6 +62,11 @@ namespace dawn_native { namespace metal {
MaybeError Device::Initialize() {
InitTogglesFromDriver();
+
+ if (!IsRobustnessEnabled() || !IsToggleEnabled(Toggle::UseSpvc)) {
+ ForceSetToggle(Toggle::MetalEnableVertexPulling, false);
+ }
+
mCommandQueue = [mMtlDevice newCommandQueue];
return DeviceBase::Initialize(new Queue(this));
@@ -70,8 +76,10 @@ namespace dawn_native { namespace metal {
{
bool haveStoreAndMSAAResolve = false;
#if defined(DAWN_PLATFORM_MACOS)
- haveStoreAndMSAAResolve =
- [mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+ if (@available(macOS 10.12, *)) {
+ haveStoreAndMSAAResolve =
+ [mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+ }
#elif defined(DAWN_PLATFORM_IOS)
haveStoreAndMSAAResolve =
[mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
@@ -108,7 +116,7 @@ namespace dawn_native { namespace metal {
const BindGroupLayoutDescriptor* descriptor) {
return new BindGroupLayout(this, descriptor);
}
- ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
@@ -254,6 +262,10 @@ namespace dawn_native { namespace metal {
// this function.
ASSERT(size != 0);
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
+ size);
+
id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
[GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
@@ -264,20 +276,63 @@ namespace dawn_native { namespace metal {
return {};
}
+ MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
+ const TextureDataLayout& dataLayout,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ Texture* texture = ToBackend(dst->texture.Get());
+
+ // This function assumes data is perfectly aligned. Otherwise, it might be necessary
+ // to split copying to several stages: see ComputeTextureBufferCopySplit.
+ const TexelBlockInfo& blockInfo = texture->GetFormat().GetTexelBlockInfo(dst->aspect);
+ uint32_t blockSize = blockInfo.blockByteSize;
+ uint32_t blockWidth = blockInfo.blockWidth;
+ uint32_t blockHeight = blockInfo.blockHeight;
+ ASSERT(dataLayout.rowsPerImage == (copySizePixels.height));
+ ASSERT(dataLayout.bytesPerRow == (copySizePixels.width) / blockWidth * blockSize);
+
+ EnsureDestinationTextureInitialized(texture, *dst, copySizePixels);
+
+ // Metal validation layer requires that if the texture's pixel format is a compressed
+ // format, the sourceSize must be a multiple of the pixel format's block size or be
+ // clamped to the edge of the texture if the block extends outside the bounds of a
+ // texture.
+ const Extent3D clampedSize =
+ texture->ClampToMipLevelVirtualSize(dst->mipLevel, dst->origin, copySizePixels);
+ const uint32_t copyBaseLayer = dst->origin.z;
+ const uint32_t copyLayerCount = copySizePixels.depth;
+ const uint64_t bytesPerImage =
+ dataLayout.rowsPerImage * dataLayout.bytesPerRow / blockHeight;
+
+ MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), dst->aspect);
+
+ uint64_t bufferOffset = dataLayout.offset;
+ for (uint32_t copyLayer = copyBaseLayer; copyLayer < copyBaseLayer + copyLayerCount;
+ ++copyLayer) {
+ [GetPendingCommandContext()->EnsureBlit()
+ copyFromBuffer:ToBackend(source)->GetBufferHandle()
+ sourceOffset:bufferOffset
+ sourceBytesPerRow:dataLayout.bytesPerRow
+ sourceBytesPerImage:bytesPerImage
+ sourceSize:MTLSizeMake(clampedSize.width, clampedSize.height, 1)
+ toTexture:texture->GetMTLTexture()
+ destinationSlice:copyLayer
+ destinationLevel:dst->mipLevel
+ destinationOrigin:MTLOriginMake(dst->origin.x, dst->origin.y, 0)
+ options:blitOption];
+
+ bufferOffset += bytesPerImage;
+ }
+
+ return {};
+ }
+
TextureBase* Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
- // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
- // GPUTextureDescriptor.size.depth is done.
- TextureDescriptor fixedDescriptor;
- if (ConsumedError(FixTextureDescriptor(this, textureDescriptor), &fixedDescriptor)) {
- return nullptr;
- }
- textureDescriptor = &fixedDescriptor;
-
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
return nullptr;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
index 7a3ad8084b3..b492e3b5827 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.h
@@ -15,7 +15,7 @@
#ifndef DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
#define DAWNNATIVE_METAL_PIPELINELAYOUTMTL_H_
-#include "common/ityp_array.h"
+#include "common/ityp_stack_vec.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/PipelineLayout.h"
@@ -44,7 +44,7 @@ namespace dawn_native { namespace metal {
using BindingIndexInfo =
ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, uint32_t, kMaxBindingsPerGroup>,
+ ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
index 3ee7d92bb1b..fa5d9262edf 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/PipelineLayoutMTL.mm
@@ -29,6 +29,8 @@ namespace dawn_native { namespace metal {
uint32_t textureIndex = 0;
for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+ mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
+
for (BindingIndex bindingIndex{0};
bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
const BindingInfo& bindingInfo =
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
index 2dd718e5c06..55915c25b52 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.h
@@ -28,6 +28,10 @@ namespace dawn_native { namespace metal {
private:
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) override;
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
index 7c5967ad81d..5202ccfef10 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/QueueMTL.mm
@@ -14,12 +14,58 @@
#include "dawn_native/metal/QueueMTL.h"
+#include "common/Math.h"
+#include "dawn_native/Buffer.h"
+#include "dawn_native/CommandValidation.h"
+#include "dawn_native/Commands.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/metal/CommandBufferMTL.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_platform/DawnPlatform.h"
#include "dawn_platform/tracing/TraceEvent.h"
namespace dawn_native { namespace metal {
+ namespace {
+ ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
+ DeviceBase* device,
+ const void* data,
+ uint32_t alignedBytesPerRow,
+ uint32_t alignedRowsPerImage,
+ const TextureDataLayout& dataLayout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& writeSizePixel) {
+ uint64_t newDataSizeBytes;
+ DAWN_TRY_ASSIGN(newDataSizeBytes,
+ ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
+ alignedBytesPerRow, alignedRowsPerImage));
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ newDataSizeBytes, device->GetPendingCommandSerial(),
+ blockInfo.blockByteSize));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+ const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+ srcPointer += dataLayout.offset;
+
+ uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
+ uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
+ if (dataRowsPerImageInBlock == 0) {
+ dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
+ }
+
+ ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
+ uint64_t imageAdditionalStride =
+ dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
+
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
+ alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
+ alignedBytesPerRow, dataLayout.bytesPerRow);
+
+ return uploadHandle;
+ }
+ }
Queue::Queue(Device* device) : QueueBase(device) {
}
@@ -31,7 +77,7 @@ namespace dawn_native { namespace metal {
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
for (uint32_t i = 0; i < commandCount; ++i) {
- ToBackend(commands[i])->FillCommands(commandContext);
+ DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
}
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
@@ -39,4 +85,43 @@ namespace dawn_native { namespace metal {
return {};
}
+ // We don't write from the CPU to the texture directly which can be done in Metal using the
+ // replaceRegion function, because the function requires a non-private storage mode and Dawn
+ // sets the private storage mode by default for all textures except IOSurfaces on macOS.
+ MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ const TexelBlockInfo& blockInfo =
+ destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
+
+ // We are only copying the part of the data that will appear in the texture.
+ // Note that validating texture copy range ensures that writeSizePixel->width and
+ // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+ uint32_t alignedBytesPerRow =
+ (writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
+ uint32_t alignedRowsPerImage = writeSizePixel.height;
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ UploadTextureDataAligningBytesPerRow(GetDevice(), data, alignedBytesPerRow,
+ alignedRowsPerImage, dataLayout,
+ blockInfo, writeSizePixel));
+
+ TextureDataLayout passDataLayout = dataLayout;
+ passDataLayout.offset = uploadHandle.startOffset;
+ passDataLayout.bytesPerRow = alignedBytesPerRow;
+ passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
+
+ return ToBackend(GetDevice())
+ ->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
+ writeSizePixel);
+ }
+
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
index 9deba399bda..1e9efe1b30c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/RenderPipelineMTL.mm
@@ -329,11 +329,24 @@ namespace dawn_native { namespace metal {
MTLRenderPipelineDescriptor* descriptorMTL = [MTLRenderPipelineDescriptor new];
+ // TODO: MakeVertexDesc should be const in the future, so we don't need to call it here when
+ // vertex pulling is enabled
+ MTLVertexDescriptor* vertexDesc = MakeVertexDesc();
+ descriptorMTL.vertexDescriptor = vertexDesc;
+ [vertexDesc release];
+
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+ // Calling MakeVertexDesc first is important since it sets indices for packed bindings
+ MTLVertexDescriptor* emptyVertexDesc = [MTLVertexDescriptor new];
+ descriptorMTL.vertexDescriptor = emptyVertexDesc;
+ [emptyVertexDesc release];
+ }
+
ShaderModule* vertexModule = ToBackend(descriptor->vertexStage.module);
const char* vertexEntryPoint = descriptor->vertexStage.entryPoint;
ShaderModule::MetalFunctionData vertexData;
DAWN_TRY(vertexModule->GetFunction(vertexEntryPoint, SingleShaderStage::Vertex,
- ToBackend(GetLayout()), &vertexData));
+ ToBackend(GetLayout()), &vertexData, 0xFFFFFFFF, this));
descriptorMTL.vertexFunction = vertexData.function;
if (vertexData.needsStorageBufferLength) {
@@ -344,7 +357,8 @@ namespace dawn_native { namespace metal {
const char* fragmentEntryPoint = descriptor->fragmentStage->entryPoint;
ShaderModule::MetalFunctionData fragmentData;
DAWN_TRY(fragmentModule->GetFunction(fragmentEntryPoint, SingleShaderStage::Fragment,
- ToBackend(GetLayout()), &fragmentData));
+ ToBackend(GetLayout()), &fragmentData,
+ descriptor->sampleMask));
descriptorMTL.fragmentFunction = fragmentData.function;
if (fragmentData.needsStorageBufferLength) {
@@ -370,18 +384,14 @@ namespace dawn_native { namespace metal {
descriptorMTL.colorAttachments[i].pixelFormat =
MetalPixelFormat(GetColorAttachmentFormat(i));
const ColorStateDescriptor* descriptor = GetColorStateDescriptor(i);
- bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Other;
+ bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Type::Other;
ComputeBlendDesc(descriptorMTL.colorAttachments[i], descriptor,
isDeclaredInFragmentShader);
}
descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
-
- MTLVertexDescriptor* vertexDesc = MakeVertexDesc();
- descriptorMTL.vertexDescriptor = vertexDesc;
- [vertexDesc release];
-
descriptorMTL.sampleCount = GetSampleCount();
+ descriptorMTL.alphaToCoverageEnabled = descriptor->alphaToCoverageEnabled;
{
NSError* error = nil;
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
index d4d41abc687..9193b25029a 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.h
@@ -29,6 +29,7 @@ namespace dawn_native { namespace metal {
class Device;
class PipelineLayout;
+ class RenderPipeline;
class ShaderModule final : public ShaderModuleBase {
public:
@@ -46,14 +47,16 @@ namespace dawn_native { namespace metal {
MaybeError GetFunction(const char* functionName,
SingleShaderStage functionStage,
const PipelineLayout* layout,
- MetalFunctionData* out);
+ MetalFunctionData* out,
+ uint32_t sampleMask = 0xFFFFFFFF,
+ const RenderPipeline* renderPipeline = nullptr);
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
~ShaderModule() override = default;
MaybeError Initialize();
- shaderc_spvc::CompileOptions GetMSLCompileOptions();
+ shaderc_spvc::CompileOptions GetMSLCompileOptions(uint32_t sampleMask = 0xFFFFFFFF);
};
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
index 5983e9ea82d..208612e6318 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/ShaderModuleMTL.mm
@@ -17,6 +17,7 @@
#include "dawn_native/BindGroupLayout.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/PipelineLayoutMTL.h"
+#include "dawn_native/metal/RenderPipelineMTL.h"
#include <spirv_msl.hpp>
@@ -91,19 +92,35 @@ namespace dawn_native { namespace metal {
MaybeError ShaderModule::GetFunction(const char* functionName,
SingleShaderStage functionStage,
const PipelineLayout* layout,
- ShaderModule::MetalFunctionData* out) {
+ ShaderModule::MetalFunctionData* out,
+ uint32_t sampleMask,
+ const RenderPipeline* renderPipeline) {
ASSERT(!IsError());
ASSERT(out);
- const std::vector<uint32_t>& spirv = GetSpirv();
+ const std::vector<uint32_t>* spirv = &GetSpirv();
+
+#ifdef DAWN_ENABLE_WGSL
+ // Use set 4 since it is bigger than what users can access currently
+ static const uint32_t kPullingBufferBindingSet = 4;
+ std::vector<uint32_t> pullingSpirv;
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+ functionStage == SingleShaderStage::Vertex) {
+ DAWN_TRY_ASSIGN(pullingSpirv,
+ GeneratePullingSpirv(*renderPipeline->GetVertexStateDescriptor(),
+ functionName, kPullingBufferBindingSet));
+ spirv = &pullingSpirv;
+ }
+#endif
std::unique_ptr<spirv_cross::CompilerMSL> compilerImpl;
spirv_cross::CompilerMSL* compiler;
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
// Initializing the compiler is needed every call, because this method uses reflection
// to mutate the compiler's IR.
- DAWN_TRY(CheckSpvcSuccess(
- mSpvcContext.InitializeForMsl(spirv.data(), spirv.size(), GetMSLCompileOptions()),
- "Unable to initialize instance of spvc"));
+ DAWN_TRY(
+ CheckSpvcSuccess(mSpvcContext.InitializeForMsl(spirv->data(), spirv->size(),
+ GetMSLCompileOptions(sampleMask)),
+ "Unable to initialize instance of spvc"));
DAWN_TRY(CheckSpvcSuccess(mSpvcContext.GetCompiler(reinterpret_cast<void**>(&compiler)),
"Unable to get cross compiler"));
} else {
@@ -122,7 +139,9 @@ namespace dawn_native { namespace metal {
// the shader storage buffer lengths.
options_msl.buffer_size_buffer_index = kBufferLengthBufferSlot;
- compilerImpl = std::make_unique<spirv_cross::CompilerMSL>(spirv);
+ options_msl.additional_fixed_sample_mask = sampleMask;
+
+ compilerImpl = std::make_unique<spirv_cross::CompilerMSL>(*spirv);
compiler = compilerImpl.get();
compiler->set_msl_options(options_msl);
}
@@ -168,6 +187,22 @@ namespace dawn_native { namespace metal {
}
}
+ // Add vertex buffers bound as storage buffers
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+ functionStage == SingleShaderStage::Vertex) {
+ for (uint32_t dawnIndex : IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+ uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(dawnIndex);
+
+ shaderc_spvc_msl_resource_binding mslBinding;
+ mslBinding.stage = ToSpvcExecutionModel(SingleShaderStage::Vertex);
+ mslBinding.desc_set = kPullingBufferBindingSet;
+ mslBinding.binding = dawnIndex;
+ mslBinding.msl_buffer = metalIndex;
+ DAWN_TRY(CheckSpvcSuccess(mSpvcContext.AddMSLResourceBinding(mslBinding),
+ "Unable to add MSL Resource Binding"));
+ }
+ }
+
{
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc_execution_model executionModel = ToSpvcExecutionModel(functionStage);
@@ -187,29 +222,37 @@ namespace dawn_native { namespace metal {
// SPIRV-Cross also supports re-ordering attributes but it seems to do the correct thing
// by default.
NSString* mslSource;
+ std::string msl;
if (GetDevice()->IsToggleEnabled(Toggle::UseSpvc)) {
shaderc_spvc::CompilationResult result;
DAWN_TRY(CheckSpvcSuccess(mSpvcContext.CompileShader(&result),
"Unable to compile MSL shader"));
- std::string result_str;
- DAWN_TRY(CheckSpvcSuccess(result.GetStringOutput(&result_str),
+ DAWN_TRY(CheckSpvcSuccess(result.GetStringOutput(&msl),
"Unable to get MSL shader text"));
- mslSource = [[NSString alloc] initWithUTF8String:result_str.c_str()];
} else {
- std::string msl = compiler->compile();
- mslSource = [[NSString alloc] initWithUTF8String:msl.c_str()];
+ msl = compiler->compile();
}
+ // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
+ // category. -Wunused-variable in particular comes up a lot in generated code, and some
+ // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
+ // of a warning.
+ msl = R"(\
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wall"
+#endif
+)" + msl;
+ mslSource = [[NSString alloc] initWithUTF8String:msl.c_str()];
+
auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
NSError* error = nil;
id<MTLLibrary> library = [mtlDevice newLibraryWithSource:mslSource
options:nil
error:&error];
if (error != nil) {
- // TODO(cwallez@chromium.org): Switch that NSLog to use dawn::InfoLog or even be
- // folded in the DAWN_VALIDATION_ERROR
- NSLog(@"MTLDevice newLibraryWithSource => %@", error);
if (error.code != MTLLibraryErrorCompileWarning) {
- return DAWN_VALIDATION_ERROR("Unable to create library object");
+ const char* errorString = [error.localizedDescription UTF8String];
+ return DAWN_VALIDATION_ERROR(std::string("Unable to create library object: ") +
+ errorString);
}
}
@@ -233,10 +276,15 @@ namespace dawn_native { namespace metal {
out->needsStorageBufferLength = compiler->needs_buffer_size_buffer();
}
+ if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+ functionStage == SingleShaderStage::Vertex && GetUsedVertexAttributes().any()) {
+ out->needsStorageBufferLength = true;
+ }
+
return {};
}
- shaderc_spvc::CompileOptions ShaderModule::GetMSLCompileOptions() {
+ shaderc_spvc::CompileOptions ShaderModule::GetMSLCompileOptions(uint32_t sampleMask) {
// If these options are changed, the values in DawnSPIRVCrossGLSLFastFuzzer.cpp need to
// be updated.
shaderc_spvc::CompileOptions options = GetCompileOptions();
@@ -252,6 +300,8 @@ namespace dawn_native { namespace metal {
// the shader storage buffer lengths.
options.SetMSLBufferSizeBufferIndex(kBufferLengthBufferSlot);
+ options.SetMSLAdditionalFixedSampleMask(sampleMask);
+
return options;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
index 454a14b2739..449f3389516 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/TextureMTL.mm
@@ -18,8 +18,10 @@
#include "common/Math.h"
#include "common/Platform.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/metal/DeviceMTL.h"
#include "dawn_native/metal/StagingBufferMTL.h"
+#include "dawn_native/metal/UtilsMetal.h"
#include <CoreVideo/CVPixelBuffer.h>
@@ -47,10 +49,6 @@ namespace dawn_native { namespace metal {
result |= MTLTextureUsageRenderTarget;
}
- if (UsageNeedsTextureView(usage)) {
- result |= MTLTextureUsagePixelFormatView;
- }
-
return result;
}
@@ -174,8 +172,10 @@ namespace dawn_native { namespace metal {
return MTLPixelFormatBGRA8Unorm_sRGB;
case wgpu::TextureFormat::RGB10A2Unorm:
return MTLPixelFormatRGB10A2Unorm;
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return MTLPixelFormatRG11B10Float;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return MTLPixelFormatRGB9E5Float;
case wgpu::TextureFormat::RG32Uint:
return MTLPixelFormatRG32Uint;
@@ -289,6 +289,8 @@ namespace dawn_native { namespace metal {
mtlDesc.width = descriptor->size.width;
mtlDesc.height = descriptor->size.height;
mtlDesc.sampleCount = descriptor->sampleCount;
+ // TODO: add MTLTextureUsagePixelFormatView when needed when we support format
+ // reinterpretation.
mtlDesc.usage = MetalTextureUsage(descriptor->usage);
mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
mtlDesc.mipmapLevelCount = descriptor->mipLevelCount;
@@ -353,7 +355,7 @@ namespace dawn_native { namespace metal {
plane:plane];
[mtlDesc release];
- SetIsSubresourceContentInitialized(descriptor->isCleared, {0, 1, 0, 1});
+ SetIsSubresourceContentInitialized(descriptor->isCleared, GetAllSubresources());
}
Texture::~Texture() {
@@ -393,8 +395,8 @@ namespace dawn_native { namespace metal {
for (uint32_t arrayLayer = range.baseArrayLayer;
arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, arrayLayer))) {
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, range.aspects))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -402,18 +404,34 @@ namespace dawn_native { namespace metal {
MTLRenderPassDescriptor* descriptor =
[MTLRenderPassDescriptor renderPassDescriptor];
- if (GetFormat().HasDepth()) {
- descriptor.depthAttachment.texture = GetMTLTexture();
- descriptor.depthAttachment.loadAction = MTLLoadActionClear;
- descriptor.depthAttachment.storeAction = MTLStoreActionStore;
- descriptor.depthAttachment.clearDepth = dClearColor;
- }
- if (GetFormat().HasStencil()) {
- descriptor.stencilAttachment.texture = GetMTLTexture();
- descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
- descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
- descriptor.stencilAttachment.clearStencil =
- static_cast<uint32_t>(clearColor);
+ // At least one aspect needs clearing. Iterate the aspects individually to
+ // determine which to clear.
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+
+ switch (aspect) {
+ case Aspect::Depth:
+ descriptor.depthAttachment.texture = GetMTLTexture();
+ descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+ descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+ descriptor.depthAttachment.clearDepth = dClearColor;
+ break;
+ case Aspect::Stencil:
+ descriptor.stencilAttachment.texture = GetMTLTexture();
+ descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+ descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+ descriptor.stencilAttachment.clearStencil =
+ static_cast<uint32_t>(clearColor);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
commandContext->BeginRender(descriptor);
@@ -433,8 +451,8 @@ namespace dawn_native { namespace metal {
for (uint32_t arrayLayer = range.baseArrayLayer;
arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, arrayLayer))) {
+ IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+ level, arrayLayer, Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -492,41 +510,29 @@ namespace dawn_native { namespace metal {
DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ GetFormat().blockByteSize));
memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
id<MTLBlitCommandEncoder> encoder = commandContext->EnsureBlit();
id<MTLBuffer> uploadBuffer = ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
// Encode a buffer to texture copy to clear each subresource.
- for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
- ++level) {
- Extent3D virtualSize = GetMipLevelVirtualSize(level);
-
- for (uint32_t arrayLayer = range.baseArrayLayer;
- arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, arrayLayer))) {
- // Skip lazy clears if already initialized.
- continue;
- }
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ for (uint32_t level = range.baseMipLevel;
+ level < range.baseMipLevel + range.levelCount; ++level) {
+ Extent3D virtualSize = GetMipLevelVirtualSize(level);
- // If the texture’s pixel format is a combined depth/stencil format, then
- // options must be set to either blit the depth attachment portion or blit the
- // stencil attachment portion.
- std::array<MTLBlitOption, 3> blitOptions = {
- MTLBlitOptionNone, MTLBlitOptionDepthFromDepthStencil,
- MTLBlitOptionStencilFromDepthStencil};
-
- auto blitOptionStart = blitOptions.begin();
- auto blitOptionEnd = blitOptionStart + 1;
- if (GetFormat().format == wgpu::TextureFormat::Depth24PlusStencil8) {
- blitOptionStart = blitOptions.begin() + 1;
- blitOptionEnd = blitOptionStart + 2;
- }
+ for (uint32_t arrayLayer = range.baseArrayLayer;
+ arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
- for (auto it = blitOptionStart; it != blitOptionEnd; ++it) {
+ MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
[encoder copyFromBuffer:uploadBuffer
sourceOffset:uploadHandle.startOffset
sourceBytesPerRow:largestMipBytesPerRow
@@ -537,7 +543,7 @@ namespace dawn_native { namespace metal {
destinationSlice:arrayLayer
destinationLevel:level
destinationOrigin:MTLOriginMake(0, 0, 0)
- options:(*it)];
+ options:blitOption];
}
}
}
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
index 091d8284f0d..d7c0a70e52c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.h
@@ -16,6 +16,8 @@
#define DAWNNATIVE_METAL_UTILSMETAL_H_
#include "dawn_native/dawn_platform.h"
+#include "dawn_native/metal/DeviceMTL.h"
+#include "dawn_native/metal/TextureMTL.h"
#import <Metal/Metal.h>
@@ -23,6 +25,36 @@ namespace dawn_native { namespace metal {
MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
+ struct TextureBufferCopySplit {
+ static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
+
+ struct CopyInfo {
+ NSUInteger bufferOffset;
+ NSUInteger bytesPerRow;
+ NSUInteger bytesPerImage;
+ Origin3D textureOrigin;
+ Extent3D copyExtent;
+ };
+
+ uint32_t count = 0;
+ std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
+ };
+
+ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
+
+ void EnsureDestinationTextureInitialized(Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size);
+
+ MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+
}} // namespace dawn_native::metal
#endif // DAWNNATIVE_METAL_UTILSMETAL_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
index aeb4f7aae8c..3b8f64c0d8c 100644
--- a/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
+++ b/chromium/third_party/dawn/src/dawn_native/metal/UtilsMetal.mm
@@ -13,6 +13,7 @@
// limitations under the License.
#include "dawn_native/metal/UtilsMetal.h"
+#include "dawn_native/CommandBuffer.h"
#include "common/Assert.h"
@@ -41,4 +42,146 @@ namespace dawn_native { namespace metal {
}
}
+ TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+ uint32_t mipLevel,
+ Origin3D origin,
+ Extent3D copyExtent,
+ uint64_t bufferSize,
+ uint64_t bufferOffset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ TextureBufferCopySplit copy;
+ const Format textureFormat = texture->GetFormat();
+
+ // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
+ // compute the correct range when checking if the buffer is big enough to contain the
+ // data for the whole copy. Instead of looking at the position of the last texel in the
+ // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
+ // format.blockHeight) * copySize.depth. For example considering the pixel buffer below
+ // where in memory, each row data (D) of the texture is followed by some padding data
+ // (P):
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDD|PP|
+ // |DDDDDDA|PP|
+ // The last pixel read will be A, but the driver will think it is the whole last padding
+ // row, causing it to generate an error when the pixel buffer is just big enough.
+
+ // We work around this limitation by detecting when Metal would complain and copy the
+ // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
+ uint32_t dataRowsPerImage = rowsPerImage / textureFormat.blockHeight;
+ uint32_t bytesPerImage = bytesPerRow * dataRowsPerImage;
+
+ // Metal validation layer requires that if the texture's pixel format is a compressed
+ // format, the sourceSize must be a multiple of the pixel format's block size or be
+ // clamped to the edge of the texture if the block extends outside the bounds of a
+ // texture.
+ const Extent3D clampedCopyExtent =
+ texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
+
+ ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+
+ // Check whether buffer size is big enough.
+ bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depth;
+ if (!needWorkaround) {
+ copy.count = 1;
+ copy.copies[0].bufferOffset = bufferOffset;
+ copy.copies[0].bytesPerRow = bytesPerRow;
+ copy.copies[0].bytesPerImage = bytesPerImage;
+ copy.copies[0].textureOrigin = origin;
+ copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depth};
+ return copy;
+ }
+
+ uint64_t currentOffset = bufferOffset;
+
+ // Doing all the copy except the last image.
+ if (copyExtent.depth > 1) {
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerImage;
+ copy.copies[copy.count].textureOrigin = origin;
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+ copyExtent.depth - 1};
+
+ ++copy.count;
+
+ // Update offset to copy to the last image.
+ currentOffset += (copyExtent.depth - 1) * bytesPerImage;
+ }
+
+ // Doing all the copy in last image except the last row.
+ uint32_t copyBlockRowCount = copyExtent.height / textureFormat.blockHeight;
+ if (copyBlockRowCount > 1) {
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = bytesPerRow;
+ copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
+ copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
+ origin.z + copyExtent.depth - 1};
+
+ ASSERT(copyExtent.height - textureFormat.blockHeight <
+ texture->GetMipLevelVirtualSize(mipLevel).height);
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
+ copyExtent.height - textureFormat.blockHeight, 1};
+
+ ++copy.count;
+
+ // Update offset to copy to the last row.
+ currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
+ }
+
+ // Doing the last row copy with the exact number of bytes in last row.
+ // Workaround this issue in a way just like the copy to a 1D texture.
+ uint32_t lastRowDataSize =
+ (copyExtent.width / textureFormat.blockWidth) * textureFormat.blockByteSize;
+ uint32_t lastRowCopyExtentHeight =
+ textureFormat.blockHeight + clampedCopyExtent.height - copyExtent.height;
+ ASSERT(lastRowCopyExtentHeight <= textureFormat.blockHeight);
+
+ copy.copies[copy.count].bufferOffset = currentOffset;
+ copy.copies[copy.count].bytesPerRow = lastRowDataSize;
+ copy.copies[copy.count].bytesPerImage = lastRowDataSize;
+ copy.copies[copy.count].textureOrigin = {
+ origin.x, origin.y + copyExtent.height - textureFormat.blockHeight,
+ origin.z + copyExtent.depth - 1};
+ copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+ ++copy.count;
+
+ return copy;
+ }
+
+ void EnsureDestinationTextureInitialized(Texture* texture,
+ const TextureCopy& dst,
+ const Extent3D& size) {
+ ASSERT(texture == dst.texture.Get());
+ SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
+ if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
+ texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ texture->EnsureSubresourceContentInitialized(range);
+ }
+ }
+
+ MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
+ ASSERT(HasOneBit(aspect));
+ ASSERT(format.aspects & aspect);
+
+ constexpr Aspect kDepthStencil = Aspect::Depth | Aspect::Stencil;
+ if ((format.aspects & kDepthStencil) == kDepthStencil) {
+ // We only provide a blit option if the format has both depth and stencil.
+ // It is invalid to provide a blit option otherwise.
+ switch (aspect) {
+ case Aspect::Depth:
+ return MTLBlitOptionDepthFromDepthStencil;
+ case Aspect::Stencil:
+ return MTLBlitOptionStencilFromDepthStencil;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return MTLBlitOptionNone;
+ }
+
}} // namespace dawn_native::metal
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
index adc44f0974c..d382bb8874a 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.cpp
@@ -100,9 +100,9 @@ namespace dawn_native { namespace null {
const BindGroupLayoutDescriptor* descriptor) {
return new BindGroupLayout(this, descriptor);
}
- ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
DAWN_TRY(IncrementMemoryUsage(descriptor->size));
- return new Buffer(this, descriptor);
+ return AcquireRef(new Buffer(this, descriptor));
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
@@ -197,6 +197,10 @@ namespace dawn_native { namespace null {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) {
+ if (IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ destination->SetIsDataInitialized();
+ }
+
auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
operation->staging = source;
operation->destination = ToBackend(destination);
@@ -209,8 +213,8 @@ namespace dawn_native { namespace null {
return {};
}
- MaybeError Device::IncrementMemoryUsage(size_t bytes) {
- static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max() / 2, "");
+ MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
+ static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
}
@@ -218,7 +222,7 @@ namespace dawn_native { namespace null {
return {};
}
- void Device::DecrementMemoryUsage(size_t bytes) {
+ void Device::DecrementMemoryUsage(uint64_t bytes) {
ASSERT(mMemoryUsage >= bytes);
mMemoryUsage -= bytes;
}
@@ -266,17 +270,6 @@ namespace dawn_native { namespace null {
// Buffer
- struct BufferMapOperation : PendingOperation {
- virtual void Execute() {
- buffer->OnMapCommandSerialFinished(serial, isWrite);
- }
-
- Ref<Buffer> buffer;
- void* ptr;
- uint32_t serial;
- bool isWrite;
- };
-
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) {
mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
@@ -287,14 +280,13 @@ namespace dawn_native { namespace null {
ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
}
- bool Buffer::IsMapWritable() const {
+ bool Buffer::IsMappableAtCreation() const {
// Only return true for mappable buffers so we can test cases that need / don't need a
// staging buffer.
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = mBackingData.get();
+ MaybeError Buffer::MapAtCreationImpl() {
return {};
}
@@ -312,26 +304,16 @@ namespace dawn_native { namespace null {
memcpy(mBackingData.get() + bufferOffset, data, size);
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
- MapAsyncImplCommon(serial, false);
+ MaybeError Buffer::MapReadAsyncImpl() {
return {};
}
- MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
- MapAsyncImplCommon(serial, true);
+ MaybeError Buffer::MapWriteAsyncImpl() {
return {};
}
- void Buffer::MapAsyncImplCommon(uint32_t serial, bool isWrite) {
- ASSERT(mBackingData);
-
- auto operation = std::make_unique<BufferMapOperation>();
- operation->buffer = this;
- operation->ptr = mBackingData.get();
- operation->serial = serial;
- operation->isWrite = isWrite;
-
- ToBackend(GetDevice())->AddPendingOperation(std::move(operation));
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ return {};
}
void* Buffer::GetMappedPointerImpl() {
@@ -347,11 +329,7 @@ namespace dawn_native { namespace null {
// CommandBuffer
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
- }
-
- CommandBuffer::~CommandBuffer() {
- FreeCommands(&mCommands);
+ : CommandBufferBase(encoder, descriptor) {
}
// QuerySet
diff --git a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
index f6f361185c8..b2dccfe918e 100644
--- a/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
+++ b/chromium/third_party/dawn/src/dawn_native/null/DeviceNull.h
@@ -106,8 +106,8 @@ namespace dawn_native { namespace null {
uint64_t destinationOffset,
uint64_t size) override;
- MaybeError IncrementMemoryUsage(size_t bytes);
- void DecrementMemoryUsage(size_t bytes);
+ MaybeError IncrementMemoryUsage(uint64_t bytes);
+ void DecrementMemoryUsage(uint64_t bytes);
private:
using DeviceBase::DeviceBase;
@@ -116,7 +116,8 @@ namespace dawn_native { namespace null {
const BindGroupDescriptor* descriptor) override;
ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
- ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
@@ -147,7 +148,7 @@ namespace dawn_native { namespace null {
std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
- static constexpr size_t kMaxMemoryUsage = 256 * 1024 * 1024;
+ static constexpr uint64_t kMaxMemoryUsage = 256 * 1024 * 1024;
size_t mMemoryUsage = 0;
};
@@ -198,14 +199,14 @@ namespace dawn_native { namespace null {
~Buffer() override;
// Dawn API
- MaybeError MapReadAsyncImpl(uint32_t serial) override;
- MaybeError MapWriteAsyncImpl(uint32_t serial) override;
+ MaybeError MapReadAsyncImpl() override;
+ MaybeError MapWriteAsyncImpl() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
- bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
- void MapAsyncImplCommon(uint32_t serial, bool isWrite);
+ bool IsMappableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
std::unique_ptr<uint8_t[]> mBackingData;
@@ -215,10 +216,6 @@ namespace dawn_native { namespace null {
public:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- private:
- ~CommandBuffer() override;
-
- CommandIterator mCommands;
};
class QuerySet final : public QuerySetBase {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
index 5ce73d88b54..2d3685980ec 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BackendGL.cpp
@@ -162,6 +162,7 @@ namespace dawn_native { namespace opengl {
mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
mFunctions.Enable(GL_MULTISAMPLE);
mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
+ mFunctions.Enable(GL_SAMPLE_MASK);
mPCIInfo.name = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
index 7e91a4940cd..6289c6edb05 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/opengl/BufferGL.h"
+#include "dawn_native/CommandBuffer.h"
#include "dawn_native/opengl/DeviceGL.h"
namespace dawn_native { namespace opengl {
@@ -24,7 +25,7 @@ namespace dawn_native { namespace opengl {
: BufferBase(device, descriptor) {
// TODO(cwallez@chromium.org): Have a global "zero" buffer instead of creating a new 4-byte
// buffer?
- uint64_t size = std::max(GetSize(), uint64_t(4u));
+ uint64_t size = GetAppliedSize();
device->gl.GenBuffers(1, &mBuffer);
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
@@ -45,22 +46,84 @@ namespace dawn_native { namespace opengl {
return mBuffer;
}
- bool Buffer::IsMapWritable() const {
+ uint64_t Buffer::GetAppliedSize() const {
+ // TODO(cwallez@chromium.org): Have a global "zero" buffer instead of creating a new 4-byte
+ // buffer?
+ return std::max(GetSize(), uint64_t(4u));
+ }
+
+ void Buffer::EnsureDataInitialized() {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ InitializeToZero();
+ }
+
+ void Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero();
+ }
+ }
+
+ void Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero();
+ }
+ }
+
+ void Buffer::InitializeToZero() {
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
+ ASSERT(!IsDataInitialized());
+
+ const uint64_t size = GetAppliedSize();
+ Device* device = ToBackend(GetDevice());
+
+ const std::vector<uint8_t> clearValues(size, 0u);
+ device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
+ device->IncrementLazyClearCountForTesting();
+
+ SetIsDataInitialized();
+ }
+
+ bool Buffer::IsMappableAtCreation() const {
// TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
// driver to migrate it to shared memory.
return true;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ MaybeError Buffer::MapAtCreationImpl() {
+ EnsureDataInitialized();
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
mMappedData = gl.MapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
- *mappedPointer = reinterpret_cast<uint8_t*>(mMappedData);
return {};
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapReadAsyncImpl() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
// TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
@@ -70,7 +133,7 @@ namespace dawn_native { namespace opengl {
return {};
}
- MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapWriteAsyncImpl() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
// TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
@@ -80,7 +143,39 @@ namespace dawn_native { namespace opengl {
return {};
}
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+ // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
+ // so we extend the range to be 4 bytes.
+ if (size == 0) {
+ if (offset != 0) {
+ offset -= 4;
+ }
+ size = 4;
+ }
+
+ EnsureDataInitialized();
+
+ // TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
+ // version of OpenGL that would let us map the buffer unsynchronized.
+ gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+ void* mappedData = nullptr;
+ if (mode & wgpu::MapMode::Read) {
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
+ }
+
+ // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
+ // the resource but OpenGL gives us the pointer at offset. Remove the offset.
+ mMappedData = static_cast<uint8_t*>(mappedData) - offset;
+ return {};
+ }
+
void* Buffer::GetMappedPointerImpl() {
+ // The mapping offset has already been removed.
return mMappedData;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
index 9949829a4be..8fcc7fcc346 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/BufferGL.h
@@ -29,17 +29,25 @@ namespace dawn_native { namespace opengl {
GLuint GetHandle() const;
+ void EnsureDataInitialized();
+ void EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
+ void EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
+
private:
~Buffer() override;
// Dawn API
- MaybeError MapReadAsyncImpl(uint32_t serial) override;
- MaybeError MapWriteAsyncImpl(uint32_t serial) override;
+ MaybeError MapReadAsyncImpl() override;
+ MaybeError MapWriteAsyncImpl() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
- bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ bool IsMappableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
+ uint64_t GetAppliedSize() const;
+
+ void InitializeToZero();
GLuint mBuffer = 0;
void* mMappedData = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
index 28c606adeac..be6b58d9d8e 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.cpp
@@ -442,14 +442,10 @@ namespace dawn_native { namespace opengl {
} // namespace
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
+ : CommandBufferBase(encoder, descriptor) {
}
- CommandBuffer::~CommandBuffer() {
- FreeCommands(&mCommands);
- }
-
- void CommandBuffer::Execute() {
+ MaybeError CommandBuffer::Execute() {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
auto TransitionForPass = [](const PassResourceUsage& usages) {
@@ -462,6 +458,10 @@ namespace dawn_native { namespace opengl {
texture->EnsureSubresourceContentInitialized(texture->GetAllSubresources());
}
}
+
+ for (BufferBase* bufferBase : usages.buffers) {
+ ToBackend(bufferBase)->EnsureDataInitialized();
+ }
};
const std::vector<PassResourceUsage>& passResourceUsages = GetResourceUsages().perPass;
@@ -493,6 +493,10 @@ namespace dawn_native { namespace opengl {
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+ ToBackend(copy->source)->EnsureDataInitialized();
+ ToBackend(copy->destination)
+ ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
+
gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
ToBackend(copy->destination)->GetHandle());
@@ -514,12 +518,17 @@ namespace dawn_native { namespace opengl {
GLenum target = texture->GetGLTarget();
const GLFormat& format = texture->GetGLFormat();
+ if (dst.aspect == Aspect::Stencil) {
+ return DAWN_VALIDATION_ERROR(
+ "Copies to stencil textures unsupported on OpenGL");
+ }
+ ASSERT(dst.aspect == Aspect::Color);
+
+ buffer->EnsureDataInitialized();
+
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- // TODO(jiawei.shao@intel.com): use copy->destination.origin.z instead of
- // copy->destination.arrayLayer once GPUTextureCopyView.arrayLayer to
- // GPUTextureCopyView.origin.z is done.
- SubresourceRange subresources = {dst.mipLevel, 1, dst.arrayLayer,
- copy->copySize.depth};
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(dst, copy->copySize);
if (IsCompleteSubresourceCopiedTo(texture, copySize, dst.mipLevel)) {
texture->SetIsSubresourceContentInitialized(true, subresources);
} else {
@@ -545,12 +554,12 @@ namespace dawn_native { namespace opengl {
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
uint64_t copyDataSize = (copySize.width / formatInfo.blockWidth) *
(copySize.height / formatInfo.blockHeight) *
- formatInfo.blockByteSize;
+ formatInfo.blockByteSize * copySize.depth;
Extent3D copyExtent = ComputeTextureCopyExtent(dst, copySize);
if (texture->GetArrayLayers() > 1) {
gl.CompressedTexSubImage3D(
- target, dst.mipLevel, dst.origin.x, dst.origin.y, dst.arrayLayer,
+ target, dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
copyExtent.width, copyExtent.height, copyExtent.depth,
format.internalFormat, copyDataSize,
reinterpret_cast<void*>(static_cast<uintptr_t>(src.offset)));
@@ -565,7 +574,7 @@ namespace dawn_native { namespace opengl {
case wgpu::TextureDimension::e2D:
if (texture->GetArrayLayers() > 1) {
gl.TexSubImage3D(target, dst.mipLevel, dst.origin.x,
- dst.origin.y, dst.arrayLayer, copySize.width,
+ dst.origin.y, dst.origin.z, copySize.width,
copySize.height, copySize.depth, format.format,
format.type,
reinterpret_cast<void*>(
@@ -598,19 +607,21 @@ namespace dawn_native { namespace opengl {
auto& copySize = copy->copySize;
Texture* texture = ToBackend(src.texture.Get());
Buffer* buffer = ToBackend(dst.buffer.Get());
- const Format& format = texture->GetFormat();
- const GLFormat& glFormat = texture->GetGLFormat();
+ const Format& formatInfo = texture->GetFormat();
+ const GLFormat& format = texture->GetGLFormat();
GLenum target = texture->GetGLTarget();
// TODO(jiawei.shao@intel.com): support texture-to-buffer copy with compressed
// texture formats.
- if (format.isCompressed) {
+ if (formatInfo.isCompressed) {
UNREACHABLE();
}
+ buffer->EnsureDataInitializedAsDestination(copy);
+
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- SubresourceRange subresources = {src.mipLevel, 1, src.arrayLayer,
- copy->copySize.depth};
+ SubresourceRange subresources =
+ GetSubresourcesAffectedByCopy(src, copy->copySize);
texture->EnsureSubresourceContentInitialized(subresources);
// The only way to move data from a texture to a buffer in GL is via
// glReadPixels with a pack buffer. Create a temporary FBO for the copy.
@@ -620,29 +631,36 @@ namespace dawn_native { namespace opengl {
gl.GenFramebuffers(1, &readFBO);
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
- GLenum glAttachment = 0;
- switch (format.aspect) {
- case Format::Aspect::Color:
+ const TexelBlockInfo& blockInfo = formatInfo.GetTexelBlockInfo(src.aspect);
+
+ gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+ gl.PixelStorei(GL_PACK_IMAGE_HEIGHT, dst.rowsPerImage);
+ gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.blockByteSize);
+
+ GLenum glAttachment;
+ GLenum glFormat;
+ GLenum glType;
+ switch (src.aspect) {
+ case Aspect::Color:
glAttachment = GL_COLOR_ATTACHMENT0;
+ glFormat = format.format;
+ glType = format.type;
break;
- case Format::Aspect::Depth:
+ case Aspect::Depth:
glAttachment = GL_DEPTH_ATTACHMENT;
+ glFormat = GL_DEPTH_COMPONENT;
+ glType = GL_FLOAT;
break;
- case Format::Aspect::Stencil:
+ case Aspect::Stencil:
glAttachment = GL_STENCIL_ATTACHMENT;
- break;
- case Format::Aspect::DepthStencil:
- glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ glFormat = GL_STENCIL_INDEX;
+ glType = GL_UNSIGNED_BYTE;
break;
default:
UNREACHABLE();
break;
}
- gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
- gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / format.blockByteSize);
- gl.PixelStorei(GL_PACK_IMAGE_HEIGHT, dst.rowsPerImage);
-
uint8_t* offset =
reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
switch (texture->GetDimension()) {
@@ -651,8 +669,7 @@ namespace dawn_native { namespace opengl {
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
texture->GetHandle(), src.mipLevel);
gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat.format, glFormat.type,
- offset);
+ copySize.height, glFormat, glType, offset);
break;
}
@@ -660,10 +677,9 @@ namespace dawn_native { namespace opengl {
for (uint32_t layer = 0; layer < copySize.depth; ++layer) {
gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
texture->GetHandle(), src.mipLevel,
- src.arrayLayer + layer);
+ src.origin.z + layer);
gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
- copySize.height, glFormat.format, glFormat.type,
- offset);
+ copySize.height, glFormat, glType, offset);
offset += bytesPerImage;
}
@@ -696,10 +712,9 @@ namespace dawn_native { namespace opengl {
Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
Texture* srcTexture = ToBackend(src.texture.Get());
Texture* dstTexture = ToBackend(dst.texture.Get());
- SubresourceRange srcRange = {src.mipLevel, 1, src.arrayLayer,
- copy->copySize.depth};
- SubresourceRange dstRange = {dst.mipLevel, 1, dst.arrayLayer,
- copy->copySize.depth};
+
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
srcTexture->EnsureSubresourceContentInitialized(srcRange);
if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
@@ -708,19 +723,33 @@ namespace dawn_native { namespace opengl {
dstTexture->EnsureSubresourceContentInitialized(dstRange);
}
gl.CopyImageSubData(srcTexture->GetHandle(), srcTexture->GetGLTarget(),
- src.mipLevel, src.origin.x, src.origin.y, src.arrayLayer,
+ src.mipLevel, src.origin.x, src.origin.y, src.origin.z,
dstTexture->GetHandle(), dstTexture->GetGLTarget(),
- dst.mipLevel, dst.origin.x, dst.origin.y, dst.arrayLayer,
+ dst.mipLevel, dst.origin.x, dst.origin.y, dst.origin.z,
copySize.width, copySize.height, copy->copySize.depth);
break;
}
+ case Command::ResolveQuerySet: {
+ // TODO(hao.x.li@intel.com): Resolve non-precise occlusion query.
+ SkipCommand(&mCommands, type);
+ break;
+ }
+
+ case Command::WriteTimestamp: {
+ // WriteTimestamp is not supported on OpenGL
+ UNREACHABLE();
+ break;
+ }
+
default: {
UNREACHABLE();
break;
}
}
}
+
+ return {};
}
void CommandBuffer::ExecuteComputePass() {
@@ -789,6 +818,12 @@ namespace dawn_native { namespace opengl {
break;
}
+ case Command::WriteTimestamp: {
+ // WriteTimestamp is not supported on OpenGL
+ UNREACHABLE();
+ break;
+ }
+
default: {
UNREACHABLE();
break;
@@ -853,19 +888,14 @@ namespace dawn_native { namespace opengl {
GLenum glAttachment = 0;
// TODO(kainino@chromium.org): it may be valid to just always use
// GL_DEPTH_STENCIL_ATTACHMENT here.
- switch (format.aspect) {
- case Format::Aspect::Depth:
- glAttachment = GL_DEPTH_ATTACHMENT;
- break;
- case Format::Aspect::Stencil:
- glAttachment = GL_STENCIL_ATTACHMENT;
- break;
- case Format::Aspect::DepthStencil:
- glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
- break;
- default:
- UNREACHABLE();
- break;
+ if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
+ glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+ } else if (format.aspects == Aspect::Depth) {
+ glAttachment = GL_DEPTH_ATTACHMENT;
+ } else if (format.aspects == Aspect::Stencil) {
+ glAttachment = GL_STENCIL_ATTACHMENT;
+ } else {
+ UNREACHABLE();
}
if (textureView->GetTexture()->GetArrayLayers() == 1) {
@@ -897,13 +927,23 @@ namespace dawn_native { namespace opengl {
auto* attachmentInfo = &renderPass->colorAttachments[i];
// Load op - color
- // TODO(cwallez@chromium.org): Choose the clear function depending on the
- // componentType: things work for now because the clear color is always a float, but
- // when that's fixed will lose precision on integer formats when converting to
- // float.
if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
gl.ColorMaski(i, true, true, true, true);
- gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo->clearColor.r);
+
+ const Format& attachmentFormat = attachmentInfo->view->GetFormat();
+ if (attachmentFormat.HasComponentType(Format::Type::Float)) {
+ gl.ClearBufferfv(GL_COLOR, i, &attachmentInfo->clearColor.r);
+ } else if (attachmentFormat.HasComponentType(Format::Type::Uint)) {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
+ } else if (attachmentFormat.HasComponentType(Format::Type::Sint)) {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo->clearColor);
+ gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
+ } else {
+ UNREACHABLE();
+ }
}
if (attachmentInfo->storeOp == wgpu::StoreOp::Clear) {
@@ -1140,6 +1180,12 @@ namespace dawn_native { namespace opengl {
break;
}
+ case Command::WriteTimestamp: {
+ // WriteTimestamp is not supported on OpenGL
+ UNREACHABLE();
+ break;
+ }
+
default: {
DoRenderBundleCommand(&mCommands, type);
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
index 482f10d09d4..b860be8c845 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/CommandBufferGL.h
@@ -15,7 +15,6 @@
#ifndef DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
#define DAWNNATIVE_OPENGL_COMMANDBUFFERGL_H_
-#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
namespace dawn_native {
@@ -30,14 +29,11 @@ namespace dawn_native { namespace opengl {
public:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- void Execute();
+ MaybeError Execute();
private:
- ~CommandBuffer() override;
void ExecuteComputePass();
void ExecuteRenderPass(BeginRenderPassCmd* renderPass);
-
- CommandIterator mCommands;
};
}} // namespace dawn_native::opengl
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
index f47474f2aef..2f06d85da9b 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.cpp
@@ -102,8 +102,8 @@ namespace dawn_native { namespace opengl {
const BindGroupLayoutDescriptor* descriptor) {
return new BindGroupLayout(this, descriptor);
}
- ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
- return new Buffer(this, descriptor);
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ return AcquireRef(new Buffer(this, descriptor));
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
index 4a03f4aeaac..a297081d83c 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/DeviceGL.h
@@ -71,7 +71,8 @@ namespace dawn_native { namespace opengl {
const BindGroupDescriptor* descriptor) override;
ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
- ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
index 9e1a20c09bb..763f5554ddb 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/GLFormat.cpp
@@ -74,7 +74,8 @@ namespace dawn_native { namespace opengl {
// This doesn't have an enum for the internal format in OpenGL, so use RGBA8.
AddFormat(wgpu::TextureFormat::BGRA8Unorm, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, Type::Float);
AddFormat(wgpu::TextureFormat::RGB10A2Unorm, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, Type::Float);
- AddFormat(wgpu::TextureFormat::RG11B10Float, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::RG11B10Ufloat, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, Type::Float);
+ AddFormat(wgpu::TextureFormat::RGB9E5Ufloat, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, Type::Float);
// 8 bytes color formats
AddFormat(wgpu::TextureFormat::RG32Uint, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, Type::Uint);
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
index 7f681f94b8a..a79909ab55c 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineGL.h
@@ -36,10 +36,6 @@ namespace dawn_native { namespace opengl {
const PipelineLayout* layout,
const PerStage<const ShaderModule*>& modules);
- using BindingLocations = ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, GLint, kMaxBindingsPerGroup>,
- kMaxBindGroups>;
-
// For each unit a sampler is bound to we need to know if we should use filtering or not
// because int and uint texture are only complete without filtering.
struct SamplerUnit {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
index 0e98c617610..8faab78b0d9 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.cpp
@@ -30,6 +30,7 @@ namespace dawn_native { namespace opengl {
for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+ mIndexInfo[group].resize(bgl->GetBindingCount());
for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
++bindingIndex) {
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
index 3d511d6d513..eeff7182dda 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/PipelineLayoutGL.h
@@ -18,6 +18,7 @@
#include "dawn_native/PipelineLayout.h"
#include "common/ityp_array.h"
+#include "common/ityp_vector.h"
#include "dawn_native/BindingInfo.h"
#include "dawn_native/opengl/opengl_platform.h"
@@ -30,9 +31,7 @@ namespace dawn_native { namespace opengl {
PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
using BindingIndexInfo =
- ityp::array<BindGroupIndex,
- ityp::array<BindingIndex, GLuint, kMaxBindingsPerGroup>,
- kMaxBindGroups>;
+ ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo() const;
GLuint GetTextureUnitsUsed() const;
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
index a33cbd0da5f..340ec250c78 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/QueueGL.cpp
@@ -30,7 +30,7 @@ namespace dawn_native { namespace opengl {
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
for (uint32_t i = 0; i < commandCount; ++i) {
- ToBackend(commands[i])->Execute();
+ DAWN_TRY(ToBackend(commands[i])->Execute());
}
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
@@ -44,6 +44,8 @@ namespace dawn_native { namespace opengl {
size_t size) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+ ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+
gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
return {};
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
index b16783ce904..7c70396f21f 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/RenderPipelineGL.cpp
@@ -43,14 +43,15 @@ namespace dawn_native { namespace opengl {
void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
wgpu::FrontFace face,
wgpu::CullMode mode) {
+ // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
+ // which is different from WebGPU and other backends (Y axis is down).
+ GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
+ gl.FrontFace(direction);
+
if (mode == wgpu::CullMode::None) {
gl.Disable(GL_CULL_FACE);
} else {
gl.Enable(GL_CULL_FACE);
- // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
- // which is different from WebGPU and other backends (Y axis is down).
- GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
- gl.FrontFace(direction);
GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
gl.CullFace(cullMode);
@@ -257,6 +258,13 @@ namespace dawn_native { namespace opengl {
ApplyDepthStencilState(gl, GetDepthStencilStateDescriptor(), &persistentPipelineState);
+ gl.SampleMaski(0, GetSampleMask());
+ if (IsAlphaToCoverageEnabled()) {
+ gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ } else {
+ gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ }
+
for (uint32_t attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
ApplyColorState(gl, attachmentSlot, GetColorStateDescriptor(attachmentSlot));
}
diff --git a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
index ee64bcda572..e0930d7c02a 100644
--- a/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/opengl/TextureGL.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Math.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/opengl/BufferGL.h"
#include "dawn_native/opengl/DeviceGL.h"
#include "dawn_native/opengl/UtilsGL.h"
@@ -199,25 +200,25 @@ namespace dawn_native { namespace opengl {
float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
if (GetFormat().isRenderable) {
- if (GetFormat().HasDepthOrStencil()) {
- bool doDepthClear = GetFormat().HasDepth();
- bool doStencilClear = GetFormat().HasStencil();
+ if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
GLfloat depth = fClearColor;
GLint stencil = clearColor;
- if (doDepthClear) {
+ if (range.aspects & Aspect::Depth) {
gl.DepthMask(GL_TRUE);
}
- if (doStencilClear) {
+ if (range.aspects & Aspect::Stencil) {
gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
}
- auto DoClear = [&]() {
- if (doDepthClear && doStencilClear) {
+ auto DoClear = [&](Aspect aspects) {
+ if (aspects == (Aspect::Depth | Aspect::Stencil)) {
gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
- } else if (doDepthClear) {
+ } else if (aspects == Aspect::Depth) {
gl.ClearBufferfv(GL_DEPTH, 0, &depth);
- } else if (doStencilClear) {
+ } else if (aspects == Aspect::Stencil) {
gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
+ } else {
+ UNREACHABLE();
}
};
@@ -230,23 +231,42 @@ namespace dawn_native { namespace opengl {
switch (GetDimension()) {
case wgpu::TextureDimension::e2D:
if (GetArrayLayers() == 1) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, 0))) {
- // Skip lazy clears if already initialized.
+ Aspect aspectsToClear = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, 0,
+ aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspectsToClear |= aspect;
+ }
+
+ if (aspectsToClear == Aspect::None) {
continue;
}
+
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
GetHandle(), static_cast<GLint>(level));
- DoClear();
+ DoClear(aspectsToClear);
} else {
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
- // Skip lazy clears if already initialized.
+ Aspect aspectsToClear = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer,
+ aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspectsToClear |= aspect;
+ }
+
+ if (aspectsToClear == Aspect::None) {
continue;
}
@@ -254,7 +274,7 @@ namespace dawn_native { namespace opengl {
GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GetHandle(), static_cast<GLint>(level),
static_cast<GLint>(layer));
- DoClear();
+ DoClear(aspectsToClear);
}
}
break;
@@ -266,6 +286,8 @@ namespace dawn_native { namespace opengl {
gl.DeleteFramebuffers(1, &framebuffer);
} else {
+ ASSERT(range.aspects == Aspect::Color);
+
static constexpr uint32_t MAX_TEXEL_SIZE = 16;
ASSERT(GetFormat().blockByteSize <= MAX_TEXEL_SIZE);
std::array<GLbyte, MAX_TEXEL_SIZE> clearColorData;
@@ -280,7 +302,7 @@ namespace dawn_native { namespace opengl {
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -292,7 +314,8 @@ namespace dawn_native { namespace opengl {
}
}
} else {
- // TODO(natlee@microsoft.com): test compressed textures are cleared
+ ASSERT(range.aspects == Aspect::Color);
+
// create temp buffer with clear color to copy to the texture image
ASSERT(kTextureBytesPerRowAlignment % GetFormat().blockByteSize == 0);
uint32_t bytesPerRow =
@@ -303,24 +326,19 @@ namespace dawn_native { namespace opengl {
ASSERT(bytesPerRow % GetFormat().blockByteSize == 0);
ASSERT(GetHeight() % GetFormat().blockHeight == 0);
- dawn_native::BufferDescriptor descriptor;
+ dawn_native::BufferDescriptor descriptor = {};
+ descriptor.mappedAtCreation = true;
+ descriptor.usage = wgpu::BufferUsage::CopySrc;
descriptor.size = bytesPerRow * (GetHeight() / GetFormat().blockHeight);
if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
}
- descriptor.nextInChain = nullptr;
- descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
+
// TODO(natlee@microsoft.com): use Dynamic Uplaoder here for temp buffer
- Ref<Buffer> srcBuffer = ToBackend(device->CreateBuffer(&descriptor));
- // Call release here to prevent memory leak since CreateBuffer will up the ref count to
- // 1, then assigning to Ref<Buffer> ups the ref count to 2. Release will reduce the ref
- // count and ensure it to reach 0 when out of use.
- srcBuffer->Release();
+ Ref<Buffer> srcBuffer = AcquireRef(ToBackend(device->CreateBuffer(&descriptor)));
// Fill the buffer with clear color
- uint8_t* clearBuffer = nullptr;
- DAWN_TRY(srcBuffer->MapAtCreation(&clearBuffer));
- memset(clearBuffer, clearColor, descriptor.size);
+ memset(srcBuffer->GetMappedRange(0, descriptor.size), clearColor, descriptor.size);
srcBuffer->Unmap();
// Bind buffer and texture, and make the buffer to texture copy
@@ -339,7 +357,7 @@ namespace dawn_native { namespace opengl {
if (GetArrayLayers() == 1) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, 0))) {
+ SubresourceRange::SingleMipAndLayer(level, 0, Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
@@ -351,7 +369,8 @@ namespace dawn_native { namespace opengl {
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
+ SubresourceRange::SingleMipAndLayer(level, layer,
+ Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
index 0a68231e7b2..613f0a01260 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BackendVk.cpp
@@ -231,7 +231,17 @@ namespace dawn_native { namespace vulkan {
appInfo.applicationVersion = 0;
appInfo.pEngineName = nullptr;
appInfo.engineVersion = 0;
- appInfo.apiVersion = mGlobalInfo.apiVersion;
+ // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
+ // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
+ // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
+ // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
+ // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
+ // treat 1.2 as the highest API version dawn targets.
+ if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
+ appInfo.apiVersion = mGlobalInfo.apiVersion;
+ } else {
+ appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
+ }
VkInstanceCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
index 1b325bee5b8..54c3dcfc40f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupLayoutVk.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "common/BitSetIterator.h"
+#include "common/ityp_vector.h"
#include "dawn_native/vulkan/BindGroupVk.h"
#include "dawn_native/vulkan/DescriptorSetAllocator.h"
#include "dawn_native/vulkan/DeviceVk.h"
@@ -85,29 +86,30 @@ namespace dawn_native { namespace vulkan {
// Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
// one entry per binding set. This might be optimized by computing continuous ranges of
// bindings of the same type.
- uint32_t numBindings = 0;
- std::array<VkDescriptorSetLayoutBinding, kMaxBindingsPerGroup> bindings;
+ ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
+ bindings.reserve(GetBindingCount());
+
for (const auto& it : GetBindingMap()) {
BindingNumber bindingNumber = it.first;
BindingIndex bindingIndex = it.second;
const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
- VkDescriptorSetLayoutBinding* vkBinding = &bindings[numBindings];
- vkBinding->binding = static_cast<uint32_t>(bindingNumber);
- vkBinding->descriptorType =
+ VkDescriptorSetLayoutBinding vkBinding;
+ vkBinding.binding = static_cast<uint32_t>(bindingNumber);
+ vkBinding.descriptorType =
VulkanDescriptorType(bindingInfo.type, bindingInfo.hasDynamicOffset);
- vkBinding->descriptorCount = 1;
- vkBinding->stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
- vkBinding->pImmutableSamplers = nullptr;
+ vkBinding.descriptorCount = 1;
+ vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+ vkBinding.pImmutableSamplers = nullptr;
- numBindings++;
+ bindings.emplace_back(vkBinding);
}
VkDescriptorSetLayoutCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
- createInfo.bindingCount = numBindings;
+ createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
createInfo.pBindings = bindings.data();
Device* device = ToBackend(GetDevice());
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
index eb31182c0a4..1124e15d8fb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BindGroupVk.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/vulkan/BindGroupVk.h"
#include "common/BitSetIterator.h"
+#include "common/ityp_stack_vec.h"
#include "dawn_native/vulkan/BindGroupLayoutVk.h"
#include "dawn_native/vulkan/BufferVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
@@ -38,11 +39,15 @@ namespace dawn_native { namespace vulkan {
mDescriptorSetAllocation(descriptorSetAllocation) {
// Now do a write of a single descriptor set with all possible chained data allocated on the
// stack.
- uint32_t numWrites = 0;
- std::array<VkWriteDescriptorSet, kMaxBindingsPerGroup> writes;
- std::array<VkDescriptorBufferInfo, kMaxBindingsPerGroup> writeBufferInfo;
- std::array<VkDescriptorImageInfo, kMaxBindingsPerGroup> writeImageInfo;
+ const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
+ ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
+ bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
+ writeBufferInfo(bindingCount);
+ ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
+ writeImageInfo(bindingCount);
+ uint32_t numWrites = 0;
for (const auto& it : GetLayout()->GetBindingMap()) {
BindingNumber bindingNumber = it.first;
BindingIndex bindingIndex = it.second;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
index 0b9385e9393..e011455348b 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.cpp
@@ -14,6 +14,7 @@
#include "dawn_native/vulkan/BufferVk.h"
+#include "dawn_native/CommandBuffer.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/ResourceHeapVk.h"
@@ -116,10 +117,10 @@ namespace dawn_native { namespace vulkan {
} // namespace
// static
- ResultOrError<Buffer*> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
DAWN_TRY(buffer->Initialize());
- return buffer.Detach();
+ return std::move(buffer);
}
MaybeError Buffer::Initialize() {
@@ -166,7 +167,7 @@ namespace dawn_native { namespace vulkan {
"vkBindBufferMemory"));
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
- ClearBuffer(device->GetPendingRecordingContext(), ClearValue::NonZero);
+ ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
}
return {};
@@ -176,14 +177,6 @@ namespace dawn_native { namespace vulkan {
DestroyInternal();
}
- void Buffer::OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data) {
- CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- }
-
- void Buffer::OnMapWriteCommandSerialFinished(uint32_t mapSerial, void* data) {
- CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
- }
-
VkBuffer Buffer::GetHandle() const {
return mHandle;
}
@@ -242,17 +235,22 @@ namespace dawn_native { namespace vulkan {
mLastUsage = usage;
}
- bool Buffer::IsMapWritable() const {
+ bool Buffer::IsMappableAtCreation() const {
// TODO(enga): Handle CPU-visible memory on UMA
return mMemoryAllocation.GetMappedPointer() != nullptr;
}
- MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
- *mappedPointer = mMemoryAllocation.GetMappedPointer();
+ MaybeError Buffer::MapAtCreationImpl() {
+ CommandRecordingContext* recordingContext =
+ ToBackend(GetDevice())->GetPendingRecordingContext();
+
+ // TODO(jiawei.shao@intel.com): initialize mapped buffer in CPU side.
+ EnsureDataInitialized(recordingContext);
+
return {};
}
- MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapReadAsyncImpl() {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
@@ -260,7 +258,7 @@ namespace dawn_native { namespace vulkan {
return {};
}
- MaybeError Buffer::MapWriteAsyncImpl(uint32_t serial) {
+ MaybeError Buffer::MapWriteAsyncImpl() {
Device* device = ToBackend(GetDevice());
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
@@ -268,6 +266,23 @@ namespace dawn_native { namespace vulkan {
return {};
}
+ MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+ Device* device = ToBackend(GetDevice());
+
+ CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+ // TODO(jiawei.shao@intel.com): initialize mapped buffer in CPU side.
+ EnsureDataInitialized(recordingContext);
+
+ if (mode & wgpu::MapMode::Read) {
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
+ } else {
+ ASSERT(mode & wgpu::MapMode::Write);
+ TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
+ }
+ return {};
+ }
+
void Buffer::UnmapImpl() {
// No need to do anything, we keep CPU-visible memory mapped at all time.
}
@@ -287,13 +302,61 @@ namespace dawn_native { namespace vulkan {
}
}
- void Buffer::ClearBuffer(CommandRecordingContext* recordingContext, ClearValue clearValue) {
- ASSERT(recordingContext != nullptr);
+ void Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
- // TODO(jiawei.shao@intel.com): support buffer lazy-initialization to 0.
- ASSERT(clearValue == BufferBase::ClearValue::NonZero);
+ InitializeToZero(recordingContext);
+ }
- constexpr uint32_t kClearBufferValue = 0x01010101;
+ void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferRange(offset, size)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero(recordingContext);
+ }
+ }
+
+ void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy) {
+ // TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
+ // instead when buffer lazy initialization is completely supported.
+ if (IsDataInitialized() ||
+ !GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
+ return;
+ }
+
+ if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+ SetIsDataInitialized();
+ } else {
+ InitializeToZero(recordingContext);
+ }
+ }
+
+ void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
+ ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
+ ASSERT(!IsDataInitialized());
+
+ ClearBuffer(recordingContext, 0u);
+ GetDevice()->IncrementLazyClearCountForTesting();
+ SetIsDataInitialized();
+ }
+
+ void Buffer::ClearBuffer(CommandRecordingContext* recordingContext, uint32_t clearValue) {
+ ASSERT(recordingContext != nullptr);
TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
@@ -301,6 +364,6 @@ namespace dawn_native { namespace vulkan {
// TODO(jiawei.shao@intel.com): find out why VK_WHOLE_SIZE doesn't work on old Windows Intel
// Vulkan drivers.
device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, 0, GetSize(),
- kClearBufferValue);
+ clearValue);
}
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
index 1c04870376a..14495d19d46 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/BufferVk.h
@@ -28,10 +28,8 @@ namespace dawn_native { namespace vulkan {
class Buffer final : public BufferBase {
public:
- static ResultOrError<Buffer*> Create(Device* device, const BufferDescriptor* descriptor);
-
- void OnMapReadCommandSerialFinished(uint32_t mapSerial, const void* data);
- void OnMapWriteCommandSerialFinished(uint32_t mapSerial, void* data);
+ static ResultOrError<Ref<Buffer>> Create(Device* device,
+ const BufferDescriptor* descriptor);
VkBuffer GetHandle() const;
@@ -45,20 +43,29 @@ namespace dawn_native { namespace vulkan {
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
+ void EnsureDataInitialized(CommandRecordingContext* recordingContext);
+ void EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ uint64_t offset,
+ uint64_t size);
+ void EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+ const CopyTextureToBufferCmd* copy);
+
private:
~Buffer() override;
using BufferBase::BufferBase;
MaybeError Initialize();
- void ClearBuffer(CommandRecordingContext* recordingContext, ClearValue clearValue);
+ void InitializeToZero(CommandRecordingContext* recordingContext);
+ void ClearBuffer(CommandRecordingContext* recordingContext, uint32_t clearValue);
// Dawn API
- MaybeError MapReadAsyncImpl(uint32_t serial) override;
- MaybeError MapWriteAsyncImpl(uint32_t serial) override;
+ MaybeError MapReadAsyncImpl() override;
+ MaybeError MapWriteAsyncImpl() override;
+ MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
void UnmapImpl() override;
void DestroyImpl() override;
- bool IsMapWritable() const override;
- MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
+ bool IsMappableAtCreation() const override;
+ MaybeError MapAtCreationImpl() override;
void* GetMappedPointerImpl() override;
VkBuffer mHandle = VK_NULL_HANDLE;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
index 575f66f8704..ce86612b7e9 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.cpp
@@ -68,23 +68,23 @@ namespace dawn_native { namespace vulkan {
// TODO(jiawei.shao@intel.com): support 1D and 3D textures
ASSERT(srcTexture->GetDimension() == wgpu::TextureDimension::e2D &&
dstTexture->GetDimension() == wgpu::TextureDimension::e2D);
- region.srcSubresource.aspectMask = srcTexture->GetVkAspectMask();
+ region.srcSubresource.aspectMask = VulkanAspectMask(srcCopy.aspect);
region.srcSubresource.mipLevel = srcCopy.mipLevel;
- region.srcSubresource.baseArrayLayer = srcCopy.arrayLayer;
+ region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
region.srcSubresource.layerCount = copySize.depth;
region.srcOffset.x = srcCopy.origin.x;
region.srcOffset.y = srcCopy.origin.y;
- region.srcOffset.z = srcCopy.origin.z;
+ region.srcOffset.z = 0;
- region.dstSubresource.aspectMask = dstTexture->GetVkAspectMask();
+ region.dstSubresource.aspectMask = VulkanAspectMask(dstCopy.aspect);
region.dstSubresource.mipLevel = dstCopy.mipLevel;
- region.dstSubresource.baseArrayLayer = dstCopy.arrayLayer;
+ region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
region.dstSubresource.layerCount = copySize.depth;
region.dstOffset.x = dstCopy.origin.x;
region.dstOffset.y = dstCopy.origin.y;
- region.dstOffset.z = dstCopy.origin.z;
+ region.dstOffset.z = 0;
ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
@@ -104,7 +104,7 @@ namespace dawn_native { namespace vulkan {
const ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups>& bindGroups,
const ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups>& dynamicOffsetCounts,
const ityp::array<BindGroupIndex,
- std::array<uint32_t, kMaxBindingsPerGroup>,
+ std::array<uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
kMaxBindGroups>& dynamicOffsets) {
for (BindGroupIndex dirtyIndex : IterateBitSet(bindGroupsToApply)) {
VkDescriptorSet set = ToBackend(bindGroups[dirtyIndex])->GetHandle();
@@ -234,10 +234,27 @@ namespace dawn_native { namespace vulkan {
attachments[attachmentCount] = view->GetHandle();
- clearValues[attachmentCount].color.float32[0] = attachmentInfo.clearColor.r;
- clearValues[attachmentCount].color.float32[1] = attachmentInfo.clearColor.g;
- clearValues[attachmentCount].color.float32[2] = attachmentInfo.clearColor.b;
- clearValues[attachmentCount].color.float32[3] = attachmentInfo.clearColor.a;
+ const Format& attachmentFormat = view->GetFormat();
+ if (attachmentFormat.HasComponentType(Format::Type::Float)) {
+ clearValues[attachmentCount].color.float32[0] = attachmentInfo.clearColor.r;
+ clearValues[attachmentCount].color.float32[1] = attachmentInfo.clearColor.g;
+ clearValues[attachmentCount].color.float32[2] = attachmentInfo.clearColor.b;
+ clearValues[attachmentCount].color.float32[3] = attachmentInfo.clearColor.a;
+ } else if (attachmentFormat.HasComponentType(Format::Type::Uint)) {
+ const std::array<uint32_t, 4> appliedClearColor =
+ ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
+ }
+ } else if (attachmentFormat.HasComponentType(Format::Type::Sint)) {
+ const std::array<int32_t, 4> appliedClearColor =
+ ConvertToSignedIntegerColor(attachmentInfo.clearColor);
+ for (uint32_t i = 0; i < 4; ++i) {
+ clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
+ }
+ } else {
+ UNREACHABLE();
+ }
attachmentCount++;
}
@@ -313,11 +330,7 @@ namespace dawn_native { namespace vulkan {
}
CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
- : CommandBufferBase(encoder, descriptor), mCommands(encoder->AcquireCommands()) {
- }
-
- CommandBuffer::~CommandBuffer() {
- FreeCommands(&mCommands);
+ : CommandBufferBase(encoder, descriptor) {
}
void CommandBuffer::RecordCopyImageWithTemporaryBuffer(
@@ -387,6 +400,7 @@ namespace dawn_native { namespace vulkan {
for (size_t i = 0; i < usages.buffers.size(); ++i) {
Buffer* buffer = ToBackend(usages.buffers[i]);
+ buffer->EnsureDataInitialized(recordingContext);
buffer->TransitionUsageNow(recordingContext, usages.bufferUsages[i],
&bufferBarriers, &srcStages, &dstStages);
}
@@ -424,6 +438,10 @@ namespace dawn_native { namespace vulkan {
Buffer* srcBuffer = ToBackend(copy->source.Get());
Buffer* dstBuffer = ToBackend(copy->destination.Get());
+ srcBuffer->EnsureDataInitialized(recordingContext);
+ dstBuffer->EnsureDataInitializedAsDestination(
+ recordingContext, copy->destinationOffset, copy->size);
+
srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
@@ -443,13 +461,16 @@ namespace dawn_native { namespace vulkan {
auto& src = copy->source;
auto& dst = copy->destination;
+ ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
+
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(src, dst, copy->copySize);
VkImageSubresourceLayers subresource = region.imageSubresource;
ASSERT(dst.texture->GetDimension() == wgpu::TextureDimension::e2D);
- SubresourceRange range = {subresource.mipLevel, 1, subresource.baseArrayLayer,
- subresource.layerCount};
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
subresource.mipLevel)) {
// Since texture has been overwritten, it has been "initialized"
@@ -478,14 +499,16 @@ namespace dawn_native { namespace vulkan {
auto& src = copy->source;
auto& dst = copy->destination;
+ ToBackend(dst.buffer)
+ ->EnsureDataInitializedAsDestination(recordingContext, copy);
+
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(dst, src, copy->copySize);
- VkImageSubresourceLayers subresource = region.imageSubresource;
ASSERT(src.texture->GetDimension() == wgpu::TextureDimension::e2D);
- const SubresourceRange range = {subresource.mipLevel, 1,
- subresource.baseArrayLayer,
- subresource.layerCount};
+ SubresourceRange range =
+ GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, range);
@@ -507,10 +530,8 @@ namespace dawn_native { namespace vulkan {
mCommands.NextCommand<CopyTextureToTextureCmd>();
TextureCopy& src = copy->source;
TextureCopy& dst = copy->destination;
- SubresourceRange srcRange = {src.mipLevel, 1, src.arrayLayer,
- copy->copySize.depth};
- SubresourceRange dstRange = {dst.mipLevel, 1, dst.arrayLayer,
- copy->copySize.depth};
+ SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+ SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
ToBackend(src.texture)
->EnsureSubresourceContentInitialized(recordingContext, srcRange);
@@ -528,10 +549,11 @@ namespace dawn_native { namespace vulkan {
// subresources should all be GENERAL instead of what we set now. Currently
// it is not allowed to copy with overlapped subresources, but we still
// add the ASSERT here as a reminder for this possible misuse.
- ASSERT(!IsRangeOverlapped(src.arrayLayer, dst.arrayLayer,
- copy->copySize.depth));
+ ASSERT(
+ !IsRangeOverlapped(src.origin.z, dst.origin.z, copy->copySize.depth));
}
+ // TODO after Yunchao's CL
ToBackend(src.texture)
->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
srcRange);
@@ -590,12 +612,20 @@ namespace dawn_native { namespace vulkan {
mCommands.NextCommand<BeginComputePassCmd>();
TransitionForPass(device, recordingContext, passResourceUsages[nextPassNumber]);
- RecordComputePass(recordingContext);
+ DAWN_TRY(RecordComputePass(recordingContext));
nextPassNumber++;
break;
}
+ case Command::ResolveQuerySet: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
UNREACHABLE();
break;
@@ -606,7 +636,7 @@ namespace dawn_native { namespace vulkan {
return {};
}
- void CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext) {
+ MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = recordingContext->commandBuffer;
@@ -617,7 +647,7 @@ namespace dawn_native { namespace vulkan {
switch (type) {
case Command::EndComputePass: {
mCommands.NextCommand<EndComputePassCmd>();
- return;
+ return {};
}
case Command::Dispatch: {
@@ -713,6 +743,10 @@ namespace dawn_native { namespace vulkan {
break;
}
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
UNREACHABLE();
break;
@@ -982,6 +1016,10 @@ namespace dawn_native { namespace vulkan {
break;
}
+ case Command::WriteTimestamp: {
+ return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation.");
+ }
+
default: {
EncodeRenderBundleCommand(&mCommands, type);
break;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
index c60fa9278f2..c5476d38ad0 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/CommandBufferVk.h
@@ -15,7 +15,6 @@
#ifndef DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
#define DAWNNATIVE_VULKAN_COMMANDBUFFERVK_H_
-#include "dawn_native/CommandAllocator.h"
#include "dawn_native/CommandBuffer.h"
#include "dawn_native/Error.h"
@@ -40,17 +39,14 @@ namespace dawn_native { namespace vulkan {
private:
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
- ~CommandBuffer() override;
- void RecordComputePass(CommandRecordingContext* recordingContext);
+ MaybeError RecordComputePass(CommandRecordingContext* recordingContext);
MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
BeginRenderPassCmd* renderPass);
void RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
const TextureCopy& srcCopy,
const TextureCopy& dstCopy,
const Extent3D& copySize);
-
- CommandIterator mCommands;
};
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp
index 9f1999c3a5d..a7b794ff424 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DescriptorSetAllocator.cpp
@@ -38,7 +38,6 @@ namespace dawn_native { namespace vulkan {
totalDescriptorCount += it.second;
mPoolSizes.push_back(VkDescriptorPoolSize{it.first, it.second});
}
- ASSERT(totalDescriptorCount <= kMaxBindingsPerGroup);
if (totalDescriptorCount == 0) {
// Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
@@ -49,6 +48,9 @@ namespace dawn_native { namespace vulkan {
mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
mMaxSets = kMaxDescriptorsPerPool;
} else {
+ ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
+ static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool, "");
+
// Compute the total number of descriptors sets that fits given the max.
mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
ASSERT(mMaxSets > 0);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
index 921dd7341c3..01ba55ce461 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.cpp
@@ -16,7 +16,6 @@
#include "common/Platform.h"
#include "dawn_native/BackendConnection.h"
-#include "dawn_native/Commands.h"
#include "dawn_native/Error.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/VulkanBackend.h"
@@ -29,6 +28,7 @@
#include "dawn_native/vulkan/ComputePipelineVk.h"
#include "dawn_native/vulkan/FencedDeleter.h"
#include "dawn_native/vulkan/PipelineLayoutVk.h"
+#include "dawn_native/vulkan/QuerySetVk.h"
#include "dawn_native/vulkan/QueueVk.h"
#include "dawn_native/vulkan/RenderPassCache.h"
#include "dawn_native/vulkan/RenderPipelineVk.h"
@@ -110,7 +110,7 @@ namespace dawn_native { namespace vulkan {
const BindGroupLayoutDescriptor* descriptor) {
return BindGroupLayout::Create(this, descriptor);
}
- ResultOrError<BufferBase*> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+ ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
return Buffer::Create(this, descriptor);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandEncoder* encoder,
@@ -126,7 +126,7 @@ namespace dawn_native { namespace vulkan {
return PipelineLayout::Create(this, descriptor);
}
ResultOrError<QuerySetBase*> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
- return DAWN_UNIMPLEMENTED_ERROR("Waiting for implementation");
+ return QuerySet::Create(this, descriptor);
}
ResultOrError<RenderPipelineBase*> Device::CreateRenderPipelineImpl(
const RenderPipelineDescriptor* descriptor) {
@@ -299,6 +299,10 @@ namespace dawn_native { namespace vulkan {
// Always require fragmentStoresAndAtomics because it is required by end2end tests.
usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+ if (IsRobustnessEnabled()) {
+ usedKnobs.features.robustBufferAccess = VK_TRUE;
+ }
+
if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
@@ -315,6 +319,12 @@ namespace dawn_native { namespace vulkan {
usedKnobs.features.textureCompressionBC = VK_TRUE;
}
+ if (IsExtensionEnabled(Extension::PipelineStatisticsQuery)) {
+ ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
+ VK_TRUE);
+ usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
+ }
+
if (IsExtensionEnabled(Extension::ShaderFloat16)) {
const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
@@ -586,16 +596,17 @@ namespace dawn_native { namespace vulkan {
// calling this function.
ASSERT(size != 0);
- // Insert memory barrier to ensure host write operations are made visible before
- // copying from the staging buffer. However, this barrier can be removed (see note below).
- //
- // Note: Depending on the spec understanding, an explicit barrier may not be required when
- // used with HOST_COHERENT as vkQueueSubmit does an implicit barrier between host and
- // device. See "Availability, Visibility, and Domain Operations" in Vulkan spec for details.
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ ToBackend(destination)
+ ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
// buffer.
- CommandRecordingContext* recordingContext = GetPendingRecordingContext();
ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
VkBufferCopy copy;
@@ -610,6 +621,43 @@ namespace dawn_native { namespace vulkan {
return {};
}
+ MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels) {
+ // There is no need of a barrier to make host writes available and visible to the copy
+ // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+ // does an implicit availability, visibility and domain operation.
+
+ CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+ VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
+ VkImageSubresourceLayers subresource = region.imageSubresource;
+
+ ASSERT(dst->texture->GetDimension() == wgpu::TextureDimension::e2D);
+ SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+ if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
+ subresource.mipLevel)) {
+ // Since texture has been overwritten, it has been "initialized"
+ dst->texture->SetIsSubresourceContentInitialized(true, range);
+ } else {
+ ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+ }
+ // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+ // texture.
+ ToBackend(dst->texture)
+ ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+ VkImage dstImage = ToBackend(dst->texture)->GetHandle();
+
+ // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+ // copy command.
+ this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+ ToBackend(source)->GetBufferHandle(), dstImage,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+ return {};
+ }
+
MaybeError Device::ImportExternalImage(const ExternalImageDescriptor* descriptor,
ExternalMemoryHandle memoryHandle,
VkImage image,
@@ -620,12 +668,6 @@ namespace dawn_native { namespace vulkan {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
- // TODO(dawn:22): Remove once migration from GPUTextureDescriptor.arrayLayerCount to
- // GPUTextureDescriptor.size.depth is done.
- TextureDescriptor fixedDescriptor;
- DAWN_TRY_ASSIGN(fixedDescriptor, FixTextureDescriptor(this, textureDescriptor));
- textureDescriptor = &fixedDescriptor;
-
// Check services support this combination of handle type / image info
if (!mExternalSemaphoreService->Supported()) {
return DAWN_VALIDATION_ERROR("External semaphore usage not supported");
@@ -812,7 +854,15 @@ namespace dawn_native { namespace vulkan {
// Immediately tag the recording context as unused so we don't try to submit it in Tick.
mRecordingContext.used = false;
- fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+ if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
+ &mRecordingContext.commandBuffer);
+ fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
+ }
for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
@@ -826,6 +876,11 @@ namespace dawn_native { namespace vulkan {
ASSERT(mCommandsInFlight.Empty());
for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+ // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+ // destroyed, but that's not the case in some drivers and the leak memory.
+ // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+ // TODO(enga): Only do this on a known list of bad drivers.
+ fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
}
mUnusedCommands.clear();
@@ -837,8 +892,12 @@ namespace dawn_native { namespace vulkan {
// Releasing the uploader enqueues buffers to be released.
// Call Tick() again to clear them before releasing the deleter.
+ mResourceMemoryAllocator->Tick(GetCompletedCommandSerial());
mDeleter->Tick(GetCompletedCommandSerial());
+ // Allow recycled memory to be deleted.
+ mResourceMemoryAllocator->DestroyPool();
+
// The VkRenderPasses in the cache can be destroyed immediately since all commands referring
// to them are guaranteed to be finished executing.
mRenderPassCache = nullptr;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
index 9e9ded9727c..89ec1ea8e8d 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/DeviceVk.h
@@ -19,6 +19,7 @@
#include "common/Serial.h"
#include "common/SerialQueue.h"
+#include "dawn_native/Commands.h"
#include "dawn_native/Device.h"
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/Forward.h"
@@ -87,6 +88,10 @@ namespace dawn_native { namespace vulkan {
BufferBase* destination,
uint64_t destinationOffset,
uint64_t size) override;
+ MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
+ const TextureDataLayout& src,
+ TextureCopy* dst,
+ const Extent3D& copySizePixels);
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
bool mappable);
@@ -107,7 +112,8 @@ namespace dawn_native { namespace vulkan {
const BindGroupDescriptor* descriptor) override;
ResultOrError<BindGroupLayoutBase*> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor) override;
- ResultOrError<BufferBase*> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+ ResultOrError<Ref<BufferBase>> CreateBufferImpl(
+ const BufferDescriptor* descriptor) override;
ResultOrError<ComputePipelineBase*> CreateComputePipelineImpl(
const ComputePipelineDescriptor* descriptor) override;
ResultOrError<PipelineLayoutBase*> CreatePipelineLayoutImpl(
@@ -194,4 +200,4 @@ namespace dawn_native { namespace vulkan {
}} // namespace dawn_native::vulkan
-#endif // DAWNNATIVE_VULKAN_DEVICEVK_H_
+#endif // DAWNNATIVE_VULKAN_DEVICEVK_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp
index 388eb93b2e5..42a42373e25 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.cpp
@@ -30,6 +30,7 @@ namespace dawn_native { namespace vulkan {
ASSERT(mMemoriesToDelete.Empty());
ASSERT(mPipelinesToDelete.Empty());
ASSERT(mPipelineLayoutsToDelete.Empty());
+ ASSERT(mQueryPoolsToDelete.Empty());
ASSERT(mRenderPassesToDelete.Empty());
ASSERT(mSamplersToDelete.Empty());
ASSERT(mSemaphoresToDelete.Empty());
@@ -70,6 +71,10 @@ namespace dawn_native { namespace vulkan {
mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
}
+ void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
+ mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+ }
+
void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
}
@@ -164,6 +169,11 @@ namespace dawn_native { namespace vulkan {
}
mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
+ for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
+ mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
+ }
+ mQueryPoolsToDelete.ClearUpTo(completedSerial);
+
for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h
index 7200eda94d6..9e516b1006c 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/FencedDeleter.h
@@ -36,6 +36,7 @@ namespace dawn_native { namespace vulkan {
void DeleteWhenUnused(VkPipelineLayout layout);
void DeleteWhenUnused(VkRenderPass renderPass);
void DeleteWhenUnused(VkPipeline pipeline);
+ void DeleteWhenUnused(VkQueryPool querypool);
void DeleteWhenUnused(VkSampler sampler);
void DeleteWhenUnused(VkSemaphore semaphore);
void DeleteWhenUnused(VkShaderModule module);
@@ -54,6 +55,7 @@ namespace dawn_native { namespace vulkan {
SerialQueue<VkImageView> mImageViewsToDelete;
SerialQueue<VkPipeline> mPipelinesToDelete;
SerialQueue<VkPipelineLayout> mPipelineLayoutsToDelete;
+ SerialQueue<VkQueryPool> mQueryPoolsToDelete;
SerialQueue<VkRenderPass> mRenderPassesToDelete;
SerialQueue<VkSampler> mSamplersToDelete;
SerialQueue<VkSemaphore> mSemaphoresToDelete;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
new file mode 100644
index 00000000000..0d740a63ffe
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.cpp
@@ -0,0 +1,112 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_native/vulkan/QuerySetVk.h"
+
+#include "dawn_native/vulkan/DeviceVk.h"
+#include "dawn_native/vulkan/FencedDeleter.h"
+#include "dawn_native/vulkan/VulkanError.h"
+#include "dawn_platform/DawnPlatform.h"
+
+namespace dawn_native { namespace vulkan {
+
+ namespace {
+ VkQueryType VulkanQueryType(wgpu::QueryType type) {
+ switch (type) {
+ case wgpu::QueryType::Occlusion:
+ return VK_QUERY_TYPE_OCCLUSION;
+ case wgpu::QueryType::PipelineStatistics:
+ return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+ case wgpu::QueryType::Timestamp:
+ return VK_QUERY_TYPE_TIMESTAMP;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
+ std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
+ VkQueryPipelineStatisticFlags pipelineStatistics = 0;
+ for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
+ switch (pipelineStatisticsSet[i]) {
+ case wgpu::PipelineStatisticName::ClipperInvocations:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
+ pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
+ break;
+ case wgpu::PipelineStatisticName::ComputeShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::FragmentShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
+ break;
+ case wgpu::PipelineStatisticName::VertexShaderInvocations:
+ pipelineStatistics |=
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ return pipelineStatistics;
+ }
+ } // anonymous namespace
+
+ // static
+ ResultOrError<QuerySet*> QuerySet::Create(Device* device,
+ const QuerySetDescriptor* descriptor) {
+ Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+ DAWN_TRY(queryset->Initialize());
+ return queryset.Detach();
+ }
+
+ MaybeError QuerySet::Initialize() {
+ VkQueryPoolCreateInfo createInfo;
+ createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+ createInfo.pNext = NULL;
+ createInfo.flags = 0;
+ createInfo.queryType = VulkanQueryType(GetQueryType());
+ createInfo.queryCount = GetQueryCount();
+ if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
+ createInfo.pipelineStatistics =
+ VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
+ }
+
+ Device* device = ToBackend(GetDevice());
+ return CheckVkOOMThenSuccess(
+ device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+ "vkCreateQueryPool");
+ }
+
+ VkQueryPool QuerySet::GetHandle() const {
+ return mHandle;
+ }
+
+ QuerySet::~QuerySet() {
+ DestroyInternal();
+ }
+
+ void QuerySet::DestroyImpl() {
+ if (mHandle != VK_NULL_HANDLE) {
+ ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+ mHandle = VK_NULL_HANDLE;
+ }
+ }
+
+}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
new file mode 100644
index 00000000000..18cd0012908
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QuerySetVk.h
@@ -0,0 +1,45 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNNATIVE_VULKAN_QUERYSETVK_H_
+#define DAWNNATIVE_VULKAN_QUERYSETVK_H_
+
+#include "dawn_native/QuerySet.h"
+
+#include "common/vulkan_platform.h"
+
+namespace dawn_native { namespace vulkan {
+
+ class Device;
+
+ class QuerySet final : public QuerySetBase {
+ public:
+ static ResultOrError<QuerySet*> Create(Device* device,
+ const QuerySetDescriptor* descriptor);
+
+ VkQueryPool GetHandle() const;
+
+ private:
+ ~QuerySet() override;
+ using QuerySetBase::QuerySetBase;
+ MaybeError Initialize();
+
+ void DestroyImpl() override;
+
+ VkQueryPool mHandle = VK_NULL_HANDLE;
+ };
+
+}} // namespace dawn_native::vulkan
+
+#endif // DAWNNATIVE_VULKAN_QUERYSETVK_H_
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
index 19ab88c7e3f..fc11c10bb13 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.cpp
@@ -14,6 +14,11 @@
#include "dawn_native/vulkan/QueueVk.h"
+#include "common/Math.h"
+#include "dawn_native/Buffer.h"
+#include "dawn_native/CommandValidation.h"
+#include "dawn_native/Commands.h"
+#include "dawn_native/DynamicUploader.h"
#include "dawn_native/vulkan/CommandBufferVk.h"
#include "dawn_native/vulkan/CommandRecordingContext.h"
#include "dawn_native/vulkan/DeviceVk.h"
@@ -22,6 +27,61 @@
namespace dawn_native { namespace vulkan {
+ namespace {
+ ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
+ DeviceBase* device,
+ const void* data,
+ uint32_t alignedBytesPerRow,
+ uint32_t optimallyAlignedBytesPerRow,
+ uint32_t alignedRowsPerImage,
+ const TextureDataLayout& dataLayout,
+ const TexelBlockInfo& blockInfo,
+ const Extent3D& writeSizePixel) {
+ uint64_t newDataSizeBytes;
+ DAWN_TRY_ASSIGN(
+ newDataSizeBytes,
+ ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
+ alignedRowsPerImage));
+
+ uint64_t optimalOffsetAlignment =
+ ToBackend(device)
+ ->GetDeviceInfo()
+ .properties.limits.optimalBufferCopyOffsetAlignment;
+ ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
+ ASSERT(IsPowerOfTwo(blockInfo.blockByteSize));
+ // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
+ // since both of them are powers of two, we only need to align to the max value.
+ uint64_t offsetAlignment =
+ std::max(optimalOffsetAlignment, uint64_t(blockInfo.blockByteSize));
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+ newDataSizeBytes, device->GetPendingCommandSerial(),
+ offsetAlignment));
+ ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+ uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+ const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+ srcPointer += dataLayout.offset;
+
+ uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
+ uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
+ if (dataRowsPerImageInBlock == 0) {
+ dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
+ }
+
+ ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
+ uint64_t imageAdditionalStride =
+ dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
+
+ CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
+ alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
+ optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
+
+ return uploadHandle;
+ }
+ } // namespace
+
// static
Queue* Queue::Create(Device* device) {
return new Queue(device);
@@ -48,4 +108,46 @@ namespace dawn_native { namespace vulkan {
return {};
}
+ MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) {
+ const TexelBlockInfo& blockInfo =
+ destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
+
+ // We are only copying the part of the data that will appear in the texture.
+ // Note that validating texture copy range ensures that writeSizePixel->width and
+ // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+ uint32_t alignedBytesPerRow =
+ (writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
+ uint32_t alignedRowsPerImage = writeSizePixel.height;
+
+ uint32_t optimalBytesPerRowAlignment =
+ ToBackend(GetDevice())
+ ->GetDeviceInfo()
+ .properties.limits.optimalBufferCopyRowPitchAlignment;
+ uint32_t optimallyAlignedBytesPerRow =
+ Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
+
+ UploadHandle uploadHandle;
+ DAWN_TRY_ASSIGN(uploadHandle,
+ UploadTextureDataAligningBytesPerRow(
+ GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+ alignedRowsPerImage, dataLayout, blockInfo, writeSizePixel));
+
+ TextureDataLayout passDataLayout = dataLayout;
+ passDataLayout.offset = uploadHandle.startOffset;
+ passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+ passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+ TextureCopy textureCopy;
+ textureCopy.texture = destination.texture;
+ textureCopy.mipLevel = destination.mipLevel;
+ textureCopy.origin = destination.origin;
+ textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
+
+ return ToBackend(GetDevice())
+ ->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
+ writeSizePixel);
+ }
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
index 715f5eb90d9..34a0e6a5795 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/QueueVk.h
@@ -31,8 +31,12 @@ namespace dawn_native { namespace vulkan {
using QueueBase::QueueBase;
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+ MaybeError WriteTextureImpl(const TextureCopyView& destination,
+ const void* data,
+ const TextureDataLayout& dataLayout,
+ const Extent3D& writeSizePixel) override;
};
}} // namespace dawn_native::vulkan
-#endif // DAWNNATIVE_VULKAN_QUEUEVK_H_
+#endif // DAWNNATIVE_VULKAN_QUEUEVK_H_ \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
index 5053e01e792..271aaaad6eb 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/RenderPipelineVk.cpp
@@ -410,8 +410,13 @@ namespace dawn_native { namespace vulkan {
multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
multisample.sampleShadingEnable = VK_FALSE;
multisample.minSampleShading = 0.0f;
- multisample.pSampleMask = nullptr;
- multisample.alphaToCoverageEnable = VK_FALSE;
+ // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
+ // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
+ // we have to assert that this length is indeed 1.
+ ASSERT(multisample.rasterizationSamples <= 32);
+ VkSampleMask sampleMask = GetSampleMask();
+ multisample.pSampleMask = &sampleMask;
+ multisample.alphaToCoverageEnable = descriptor->alphaToCoverageEnabled;
multisample.alphaToOneEnable = VK_FALSE;
VkPipelineDepthStencilStateCreateInfo depthStencilState =
@@ -424,7 +429,7 @@ namespace dawn_native { namespace vulkan {
descriptor->fragmentStage->module->GetFragmentOutputBaseTypes();
for (uint32_t i : IterateBitSet(GetColorAttachmentsMask())) {
const ColorStateDescriptor* colorStateDescriptor = GetColorStateDescriptor(i);
- bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Other;
+ bool isDeclaredInFragmentShader = fragmentOutputBaseTypes[i] != Format::Type::Other;
colorBlendAttachments[i] =
ComputeColorDesc(colorStateDescriptor, isDeclaredInFragmentShader);
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
index 22523a36874..924a47b9fed 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -46,17 +46,22 @@ namespace dawn_native { namespace vulkan {
: mDevice(device),
mMemoryTypeIndex(memoryTypeIndex),
mMemoryHeapSize(memoryHeapSize),
+ mPooledMemoryAllocator(this),
mBuddySystem(
// Round down to a power of 2 that's <= mMemoryHeapSize. This will always
// be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
uint64_t(1) << Log2(mMemoryHeapSize),
// Take the min in the very unlikely case the memory heap is tiny.
std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
- this) {
+ &mPooledMemoryAllocator) {
ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
}
~SingleTypeAllocator() override = default;
+ void DestroyPool() {
+ mPooledMemoryAllocator.DestroyPool();
+ }
+
ResultOrError<ResourceMemoryAllocation> AllocateMemory(
const VkMemoryRequirements& requirements) {
return mBuddySystem.Allocate(requirements.size, requirements.alignment);
@@ -100,6 +105,7 @@ namespace dawn_native { namespace vulkan {
Device* mDevice;
size_t mMemoryTypeIndex;
VkDeviceSize mMemoryHeapSize;
+ PooledResourceMemoryAllocator mPooledMemoryAllocator;
BuddyMemoryAllocator mBuddySystem;
};
@@ -258,4 +264,10 @@ namespace dawn_native { namespace vulkan {
return bestType;
}
+ void ResourceMemoryAllocator::DestroyPool() {
+ for (auto& alloc : mAllocatorsPerType) {
+ alloc->DestroyPool();
+ }
+ }
+
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
index 88f6d4e0c66..04176a33000 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/ResourceMemoryAllocatorVk.h
@@ -18,6 +18,7 @@
#include "common/SerialQueue.h"
#include "common/vulkan_platform.h"
#include "dawn_native/Error.h"
+#include "dawn_native/PooledResourceMemoryAllocator.h"
#include "dawn_native/ResourceMemoryAllocation.h"
#include <memory>
@@ -36,6 +37,8 @@ namespace dawn_native { namespace vulkan {
bool mappable);
void Deallocate(ResourceMemoryAllocation* allocation);
+ void DestroyPool();
+
void Tick(Serial completedSerial);
int FindBestTypeIndex(VkMemoryRequirements requirements, bool mappable);
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
index 660128776d6..30d774d195e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/SwapChainVk.cpp
@@ -47,8 +47,8 @@ namespace dawn_native { namespace vulkan {
return nullptr;
}
- VkImage nativeTexture =
- VkImage::CreateFromHandle(reinterpret_cast<::VkImage>(next.texture.u64));
+ ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
+ VkImage nativeTexture = VkImage::CreateFromHandle(image);
return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
.Detach();
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
index eb502f8c1f3..e61facd09fd 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.cpp
@@ -17,6 +17,7 @@
#include "common/Assert.h"
#include "common/Math.h"
#include "dawn_native/DynamicUploader.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Error.h"
#include "dawn_native/VulkanBackend.h"
#include "dawn_native/vulkan/AdapterVk.h"
@@ -192,23 +193,6 @@ namespace dawn_native { namespace vulkan {
return flags;
}
- // Computes which Vulkan texture aspects are relevant for the given Dawn format
- VkImageAspectFlags VulkanAspectMask(const Format& format) {
- switch (format.aspect) {
- case Format::Aspect::Color:
- return VK_IMAGE_ASPECT_COLOR_BIT;
- case Format::Aspect::Depth:
- return VK_IMAGE_ASPECT_DEPTH_BIT;
- case Format::Aspect::Stencil:
- return VK_IMAGE_ASPECT_STENCIL_BIT;
- case Format::Aspect::DepthStencil:
- return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
- default:
- UNREACHABLE();
- return 0;
- }
- }
-
VkImageMemoryBarrier BuildMemoryBarrier(const Format& format,
const VkImage& image,
wgpu::TextureUsage lastUsage,
@@ -222,7 +206,7 @@ namespace dawn_native { namespace vulkan {
barrier.oldLayout = VulkanImageLayout(lastUsage, format);
barrier.newLayout = VulkanImageLayout(usage, format);
barrier.image = image;
- barrier.subresourceRange.aspectMask = VulkanAspectMask(format);
+ barrier.subresourceRange.aspectMask = VulkanAspectMask(format.aspects);
barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
barrier.subresourceRange.levelCount = range.levelCount;
barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
@@ -311,8 +295,10 @@ namespace dawn_native { namespace vulkan {
return VK_FORMAT_B8G8R8A8_SRGB;
case wgpu::TextureFormat::RGB10A2Unorm:
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
case wgpu::TextureFormat::RG32Uint:
return VK_FORMAT_R32G32_UINT;
@@ -591,7 +577,7 @@ namespace dawn_native { namespace vulkan {
// Don't clear imported texture if already cleared
if (descriptor->isCleared) {
- SetIsSubresourceContentInitialized(true, {0, 1, 0, 1});
+ SetIsSubresourceContentInitialized(true, GetAllSubresources());
}
// Success, acquire all the external objects.
@@ -663,8 +649,21 @@ namespace dawn_native { namespace vulkan {
return mHandle;
}
- VkImageAspectFlags Texture::GetVkAspectMask() const {
- return VulkanAspectMask(GetFormat());
+ VkImageAspectFlags Texture::GetVkAspectMask(wgpu::TextureAspect aspect) const {
+ // TODO(enga): These masks could be precomputed.
+ switch (aspect) {
+ case wgpu::TextureAspect::All:
+ return VulkanAspectMask(GetFormat().aspects);
+ case wgpu::TextureAspect::DepthOnly:
+ ASSERT(GetFormat().aspects & Aspect::Depth);
+ return VulkanAspectMask(Aspect::Depth);
+ case wgpu::TextureAspect::StencilOnly:
+ ASSERT(GetFormat().aspects & Aspect::Stencil);
+ return VulkanAspectMask(Aspect::Stencil);
+ default:
+ UNREACHABLE();
+ return 0;
+ }
}
void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
@@ -681,7 +680,7 @@ namespace dawn_native { namespace vulkan {
if (barriers->size() == transitionBarrierStart) {
barriers->push_back(BuildMemoryBarrier(
GetFormat(), mHandle, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
- SubresourceRange::SingleSubresource(0, 0)));
+ SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
}
// Transfer texture from external queue to graphics queue
@@ -695,7 +694,7 @@ namespace dawn_native { namespace vulkan {
if (barriers->size() == transitionBarrierStart) {
barriers->push_back(BuildMemoryBarrier(
GetFormat(), mHandle, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
- SubresourceRange::SingleSubresource(0, 0)));
+ SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
}
// Transfer texture from graphics queue to external queue
@@ -762,24 +761,40 @@ namespace dawn_native { namespace vulkan {
} else {
for (uint32_t arrayLayer = 0; arrayLayer < GetArrayLayers(); ++arrayLayer) {
for (uint32_t mipLevel = 0; mipLevel < GetNumMipLevels(); ++mipLevel) {
- uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer);
+ wgpu::TextureUsage lastUsage = wgpu::TextureUsage::None;
+ wgpu::TextureUsage usage = wgpu::TextureUsage::None;
+
+ // Accumulate usage for all format aspects because we cannot transition
+ // separately.
+ // TODO(enga): Use VK_KHR_separate_depth_stencil_layouts.
+ for (Aspect aspect : IterateEnumMask(GetFormat().aspects)) {
+ uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+
+ usage |= textureUsages.subresourceUsages[index];
+ lastUsage |= mSubresourceLastUsages[index];
+ }
// Avoid encoding barriers when it isn't needed.
- if (textureUsages.subresourceUsages[index] == wgpu::TextureUsage::None) {
+ if (usage == wgpu::TextureUsage::None) {
continue;
}
- if (CanReuseWithoutBarrier(mSubresourceLastUsages[index],
- textureUsages.subresourceUsages[index])) {
+ if (CanReuseWithoutBarrier(lastUsage, usage)) {
continue;
}
- imageBarriers->push_back(BuildMemoryBarrier(
- format, mHandle, mSubresourceLastUsages[index],
- textureUsages.subresourceUsages[index],
- SubresourceRange::SingleSubresource(mipLevel, arrayLayer)));
- allLastUsages |= mSubresourceLastUsages[index];
- allUsages |= textureUsages.subresourceUsages[index];
- mSubresourceLastUsages[index] = textureUsages.subresourceUsages[index];
+
+ allLastUsages |= lastUsage;
+ allUsages |= usage;
+
+ for (Aspect aspect : IterateEnumMask(GetFormat().aspects)) {
+ uint32_t index = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+ mSubresourceLastUsages[index] = usage;
+ }
+
+ imageBarriers->push_back(
+ BuildMemoryBarrier(format, mHandle, lastUsage, usage,
+ SubresourceRange::SingleMipAndLayer(
+ mipLevel, arrayLayer, GetFormat().aspects)));
}
}
}
@@ -801,7 +816,6 @@ namespace dawn_native { namespace vulkan {
const Format& format = GetFormat();
wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
- uint32_t subresourceCount = GetSubresourceCount();
// This transitions assume it is a 2D texture
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
@@ -810,7 +824,9 @@ namespace dawn_native { namespace vulkan {
// are the same, then we can use one barrier to do state transition for all subresources.
// Note that if the texture has only one mip level and one array slice, it will fall into
// this category.
- bool areAllSubresourcesCovered = range.levelCount * range.layerCount == subresourceCount;
+ bool areAllSubresourcesCovered = (range.levelCount == GetNumMipLevels() && //
+ range.layerCount == GetArrayLayers() && //
+ range.aspects == format.aspects);
if (mSameLastUsagesAcrossSubresources && areAllSubresourcesCovered) {
ASSERT(range.baseMipLevel == 0 && range.baseArrayLayer == 0);
if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], usage)) {
@@ -819,7 +835,7 @@ namespace dawn_native { namespace vulkan {
barriers.push_back(
BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0], usage, range));
allLastUsages = mSubresourceLastUsages[0];
- for (uint32_t i = 0; i < subresourceCount; ++i) {
+ for (uint32_t i = 0; i < GetSubresourceCount(); ++i) {
mSubresourceLastUsages[i] = usage;
}
} else {
@@ -827,17 +843,29 @@ namespace dawn_native { namespace vulkan {
layer < range.baseArrayLayer + range.layerCount; ++layer) {
for (uint32_t level = range.baseMipLevel;
level < range.baseMipLevel + range.levelCount; ++level) {
- uint32_t index = GetSubresourceIndex(level, layer);
+ // Accumulate usage for all format aspects because we cannot transition
+ // separately.
+ // TODO(enga): Use VK_KHR_separate_depth_stencil_layouts.
+ wgpu::TextureUsage lastUsage = wgpu::TextureUsage::None;
+ for (Aspect aspect : IterateEnumMask(format.aspects)) {
+ uint32_t index = GetSubresourceIndex(level, layer, aspect);
+ lastUsage |= mSubresourceLastUsages[index];
+ }
- if (CanReuseWithoutBarrier(mSubresourceLastUsages[index], usage)) {
+ if (CanReuseWithoutBarrier(lastUsage, usage)) {
continue;
}
- barriers.push_back(
- BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[index], usage,
- SubresourceRange::SingleSubresource(level, layer)));
- allLastUsages |= mSubresourceLastUsages[index];
- mSubresourceLastUsages[index] = usage;
+ allLastUsages |= lastUsage;
+
+ for (Aspect aspect : IterateEnumMask(format.aspects)) {
+ uint32_t index = GetSubresourceIndex(level, layer, aspect);
+ mSubresourceLastUsages[index] = usage;
+ }
+
+ barriers.push_back(BuildMemoryBarrier(
+ format, mHandle, lastUsage, usage,
+ SubresourceRange::SingleMipAndLayer(level, layer, format.aspects)));
}
}
}
@@ -866,7 +894,6 @@ namespace dawn_native { namespace vulkan {
TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
if (GetFormat().isRenderable) {
VkImageSubresourceRange imageRange = {};
- imageRange.aspectMask = GetVkAspectMask();
imageRange.levelCount = 1;
imageRange.layerCount = 1;
@@ -875,16 +902,25 @@ namespace dawn_native { namespace vulkan {
imageRange.baseMipLevel = level;
for (uint32_t layer = range.baseArrayLayer;
layer < range.baseArrayLayer + range.layerCount; ++layer) {
- if (clearValue == TextureBase::ClearValue::Zero &&
- IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
- // Skip lazy clears if already initialized.
+ Aspect aspects = Aspect::None;
+ for (Aspect aspect : IterateEnumMask(range.aspects)) {
+ if (clearValue == TextureBase::ClearValue::Zero &&
+ IsSubresourceContentInitialized(
+ SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+ // Skip lazy clears if already initialized.
+ continue;
+ }
+ aspects |= aspect;
+ }
+
+ if (aspects == Aspect::None) {
continue;
}
+ imageRange.aspectMask = VulkanAspectMask(aspects);
imageRange.baseArrayLayer = layer;
- if (GetFormat().HasDepthOrStencil()) {
+ if (aspects & (Aspect::Depth | Aspect::Stencil)) {
VkClearDepthStencilValue clearDepthStencilValue[1];
clearDepthStencilValue[0].depth = fClearColor;
clearDepthStencilValue[0].stencil = clearColor;
@@ -893,6 +929,7 @@ namespace dawn_native { namespace vulkan {
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
&imageRange);
} else {
+ ASSERT(aspects == Aspect::Color);
VkClearColorValue clearColorValue = {
{fClearColor, fClearColor, fClearColor, fClearColor}};
device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
@@ -902,7 +939,6 @@ namespace dawn_native { namespace vulkan {
}
}
} else {
- // TODO(natlee@microsoft.com): test compressed textures are cleared
// create temp buffer with clear color to copy to the texture image
uint32_t bytesPerRow =
Align((GetWidth() / GetFormat().blockWidth) * GetFormat().blockByteSize,
@@ -915,7 +951,8 @@ namespace dawn_native { namespace vulkan {
DynamicUploader* uploader = device->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
- uploader->Allocate(bufferSize, device->GetPendingCommandSerial()));
+ uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+ GetFormat().blockByteSize));
memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
// compute the buffer image copy to set the clear region of entire texture
@@ -924,6 +961,7 @@ namespace dawn_native { namespace vulkan {
bufferCopy.offset = uploadHandle.startOffset;
bufferCopy.bytesPerRow = bytesPerRow;
+ ASSERT(range.aspects == Aspect::Color);
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
++level) {
Extent3D copySize = GetMipLevelVirtualSize(level);
@@ -932,16 +970,17 @@ namespace dawn_native { namespace vulkan {
layer < range.baseArrayLayer + range.layerCount; ++layer) {
if (clearValue == TextureBase::ClearValue::Zero &&
IsSubresourceContentInitialized(
- SubresourceRange::SingleSubresource(level, layer))) {
+ SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
// Skip lazy clears if already initialized.
continue;
}
+ ASSERT(GetFormat().aspects == Aspect::Color);
dawn_native::TextureCopy textureCopy;
textureCopy.texture = this;
- textureCopy.origin = {0, 0, 0};
+ textureCopy.origin = {0, 0, layer};
textureCopy.mipLevel = level;
- textureCopy.arrayLayer = layer;
+ textureCopy.aspect = GetFormat().aspects;
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(bufferCopy, textureCopy, copySize);
@@ -1008,11 +1047,13 @@ namespace dawn_native { namespace vulkan {
createInfo.format = VulkanImageFormat(device, descriptor->format);
createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
- createInfo.subresourceRange.aspectMask = VulkanAspectMask(GetFormat());
- createInfo.subresourceRange.baseMipLevel = descriptor->baseMipLevel;
- createInfo.subresourceRange.levelCount = descriptor->mipLevelCount;
- createInfo.subresourceRange.baseArrayLayer = descriptor->baseArrayLayer;
- createInfo.subresourceRange.layerCount = descriptor->arrayLayerCount;
+
+ const SubresourceRange& subresources = GetSubresourceRange();
+ createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
+ createInfo.subresourceRange.levelCount = subresources.levelCount;
+ createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
+ createInfo.subresourceRange.layerCount = subresources.layerCount;
+ createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
return CheckVkSuccess(
device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
index 8a1564af72b..6748ebd0d4f 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/TextureVk.h
@@ -59,7 +59,7 @@ namespace dawn_native { namespace vulkan {
VkImage nativeImage);
VkImage GetHandle() const;
- VkImageAspectFlags GetVkAspectMask() const;
+ VkImageAspectFlags GetVkAspectMask(wgpu::TextureAspect aspect) const;
// Transitions the texture to be used as `usage`, recording any necessary barrier in
// `commands`.
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
index 4ce513f5840..1b6b2d66d31 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.cpp
@@ -15,6 +15,7 @@
#include "dawn_native/vulkan/UtilsVulkan.h"
#include "common/Assert.h"
+#include "dawn_native/EnumMaskIterator.h"
#include "dawn_native/Format.h"
#include "dawn_native/vulkan/Forward.h"
#include "dawn_native/vulkan/TextureVk.h"
@@ -44,6 +45,28 @@ namespace dawn_native { namespace vulkan {
}
}
+ // Convert Dawn texture aspects to Vulkan texture aspect flags
+ VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
+ VkImageAspectFlags flags = 0;
+ for (Aspect aspect : IterateEnumMask(aspects)) {
+ switch (aspect) {
+ case Aspect::Color:
+ flags |= VK_IMAGE_ASPECT_COLOR_BIT;
+ break;
+ case Aspect::Depth:
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ break;
+ case Aspect::Stencil:
+ flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return flags;
+ }
+
// Vulkan SPEC requires the source/destination region specified by each element of
// pRegions must be a region that is contained within srcImage/dstImage. Here the size of
// the image refers to the virtual size, while Dawn validates texture copy extent with the
@@ -68,32 +91,52 @@ namespace dawn_native { namespace vulkan {
VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
const TextureCopy& textureCopy,
const Extent3D& copySize) {
+ TextureDataLayout passDataLayout;
+ passDataLayout.offset = bufferCopy.offset;
+ passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
+ passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
+ return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
+ }
+
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize) {
const Texture* texture = ToBackend(textureCopy.texture.Get());
VkBufferImageCopy region;
- region.bufferOffset = bufferCopy.offset;
+ region.bufferOffset = dataLayout.offset;
// In Vulkan the row length is in texels while it is in bytes for Dawn
- const Format& format = texture->GetFormat();
- ASSERT(bufferCopy.bytesPerRow % format.blockByteSize == 0);
- region.bufferRowLength = bufferCopy.bytesPerRow / format.blockByteSize * format.blockWidth;
- region.bufferImageHeight = bufferCopy.rowsPerImage;
+ const TexelBlockInfo& blockInfo =
+ texture->GetFormat().GetTexelBlockInfo(textureCopy.aspect);
+ ASSERT(dataLayout.bytesPerRow % blockInfo.blockByteSize == 0);
+ region.bufferRowLength =
+ dataLayout.bytesPerRow / blockInfo.blockByteSize * blockInfo.blockWidth;
+ region.bufferImageHeight = dataLayout.rowsPerImage;
- region.imageSubresource.aspectMask = texture->GetVkAspectMask();
+ region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
region.imageSubresource.mipLevel = textureCopy.mipLevel;
- region.imageSubresource.baseArrayLayer = textureCopy.arrayLayer;
- region.imageOffset.x = textureCopy.origin.x;
- region.imageOffset.y = textureCopy.origin.y;
- region.imageOffset.z = textureCopy.origin.z;
+ switch (textureCopy.texture->GetDimension()) {
+ case wgpu::TextureDimension::e2D: {
+ region.imageOffset.x = textureCopy.origin.x;
+ region.imageOffset.y = textureCopy.origin.y;
+ region.imageOffset.z = 0;
+
+ region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
+ region.imageSubresource.layerCount = copySize.depth;
- Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
- region.imageExtent.width = imageExtent.width;
- region.imageExtent.height = imageExtent.height;
+ Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+ region.imageExtent.width = imageExtent.width;
+ region.imageExtent.height = imageExtent.height;
+ region.imageExtent.depth = 1;
+ break;
+ }
- ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
- region.imageSubresource.layerCount = copySize.depth;
- region.imageExtent.depth = 1;
+ default:
+ UNREACHABLE();
+ break;
+ }
return region;
}
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
index 36ebd34fe67..e57e3f4c768 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/UtilsVulkan.h
@@ -88,10 +88,16 @@ namespace dawn_native { namespace vulkan {
VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
+ VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
+
Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
+
VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
const TextureCopy& textureCopy,
const Extent3D& copySize);
+ VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+ const TextureCopy& textureCopy,
+ const Extent3D& copySize);
}} // namespace dawn_native::vulkan
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index 99057771f63..d43a10f7031 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -111,7 +111,7 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkImportMemoryFdInfoKHR importMemoryFdInfo;
importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
importMemoryFdInfo.pNext = nullptr;
- importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+ importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
importMemoryFdInfo.fd = handle;
VkMemoryAllocateInfo allocateInfo;
@@ -129,7 +129,13 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
const VkImageCreateInfo& baseCreateInfo) {
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+
VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.pNext = &externalMemoryImageCreateInfo;
createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
diff --git a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 08d8d630045..85c4e4a8d3e 100644
--- a/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/chromium/third_party/dawn/src/dawn_native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -131,7 +131,14 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
const VkImageCreateInfo& baseCreateInfo) {
+ VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+ externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+ externalMemoryImageCreateInfo.pNext = nullptr;
+ externalMemoryImageCreateInfo.handleTypes =
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+
VkImageCreateInfo createInfo = baseCreateInfo;
+ createInfo.pNext = &externalMemoryImageCreateInfo;
createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
diff --git a/chromium/third_party/dawn/src/dawn_platform/BUILD.gn b/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
index a152b9ecdea..c2b10933a32 100644
--- a/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_platform/BUILD.gn
@@ -24,7 +24,5 @@ source_set("dawn_platform") {
"tracing/TraceEvent.h",
]
- deps = [
- "${dawn_root}/src/common",
- ]
+ deps = [ "${dawn_root}/src/common" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
index 5d1b2d7f2df..9e463423242 100644
--- a/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
+++ b/chromium/third_party/dawn/src/dawn_wire/BUILD.gn
@@ -20,9 +20,7 @@ import("${dawn_root}/scripts/dawn_component.gni")
# Public dawn_wire headers so they can be publically visible for
# dependencies of dawn_wire
source_set("dawn_wire_headers") {
- public_deps = [
- "${dawn_root}/src/dawn:dawn_headers",
- ]
+ public_deps = [ "${dawn_root}/src/dawn:dawn_headers" ]
all_dependent_configs = [ "${dawn_root}/src/common:dawn_public_include_dirs" ]
sources = [
"${dawn_root}/src/include/dawn_wire/Wire.h",
@@ -39,7 +37,6 @@ dawn_json_generator("dawn_wire_gen") {
"src/dawn_wire/WireCmd_autogen.cpp",
"src/dawn_wire/client/ApiObjects_autogen.h",
"src/dawn_wire/client/ApiProcs_autogen.cpp",
- "src/dawn_wire/client/ApiProcs_autogen.h",
"src/dawn_wire/client/ClientBase_autogen.h",
"src/dawn_wire/client/ClientHandlers_autogen.cpp",
"src/dawn_wire/client/ClientPrototypes_autogen.inc",
@@ -66,7 +63,6 @@ dawn_component("dawn_wire") {
"WireDeserializeAllocator.h",
"WireServer.cpp",
"client/ApiObjects.h",
- "client/ApiProcs.cpp",
"client/Buffer.cpp",
"client/Buffer.h",
"client/Client.cpp",
@@ -78,6 +74,8 @@ dawn_component("dawn_wire") {
"client/Fence.cpp",
"client/Fence.h",
"client/ObjectAllocator.h",
+ "client/Queue.cpp",
+ "client/Queue.h",
"server/ObjectStorage.h",
"server/Server.cpp",
"server/Server.h",
@@ -89,7 +87,5 @@ dawn_component("dawn_wire") {
]
# Make headers publicly visible
- public_deps = [
- ":dawn_wire_headers",
- ]
+ public_deps = [ ":dawn_wire_headers" ]
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
index 8ec7bff9ab9..e7927fa6b56 100644
--- a/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/dawn_wire/CMakeLists.txt
@@ -30,7 +30,6 @@ target_sources(dawn_wire PRIVATE
"WireDeserializeAllocator.h"
"WireServer.cpp"
"client/ApiObjects.h"
- "client/ApiProcs.cpp"
"client/Buffer.cpp"
"client/Buffer.h"
"client/Client.cpp"
@@ -42,6 +41,8 @@ target_sources(dawn_wire PRIVATE
"client/Fence.cpp"
"client/Fence.h"
"client/ObjectAllocator.h"
+ "client/Queue.cpp"
+ "client/Queue.h"
"server/ObjectStorage.h"
"server/Server.cpp"
"server/Server.h"
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h b/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
index b74eefe6153..f842d53f469 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ApiObjects.h
@@ -20,6 +20,7 @@
#include "dawn_wire/client/Buffer.h"
#include "dawn_wire/client/Device.h"
#include "dawn_wire/client/Fence.h"
+#include "dawn_wire/client/Queue.h"
#include "dawn_wire/client/ApiObjects_autogen.h"
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
deleted file mode 100644
index 358e7175252..00000000000
--- a/chromium/third_party/dawn/src/dawn_wire/client/ApiProcs.cpp
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2019 The Dawn Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "dawn_wire/client/ApiObjects.h"
-#include "dawn_wire/client/ApiProcs_autogen.h"
-#include "dawn_wire/client/Client.h"
-
-namespace dawn_wire { namespace client {
-
- namespace {
- template <typename Handle>
- void SerializeBufferMapAsync(const Buffer* buffer, uint32_t serial, Handle* handle) {
- // TODO(enga): Remove the template when Read/Write handles are combined in a tagged
- // pointer.
- constexpr bool isWrite =
- std::is_same<Handle, MemoryTransferService::WriteHandle>::value;
-
- // Get the serialization size of the handle.
- size_t handleCreateInfoLength = handle->SerializeCreateSize();
-
- BufferMapAsyncCmd cmd;
- cmd.bufferId = buffer->id;
- cmd.requestSerial = serial;
- cmd.isWrite = isWrite;
- cmd.handleCreateInfoLength = handleCreateInfoLength;
- cmd.handleCreateInfo = nullptr;
-
- char* writeHandleSpace =
- buffer->device->GetClient()->SerializeCommand(cmd, handleCreateInfoLength);
-
- // Serialize the handle into the space after the command.
- handle->SerializeCreate(writeHandleSpace);
- }
- } // namespace
-
- void ClientHandwrittenBufferMapReadAsync(WGPUBuffer cBuffer,
- WGPUBufferMapReadCallback callback,
- void* userdata) {
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- uint32_t serial = buffer->requestSerial++;
- ASSERT(buffer->requests.find(serial) == buffer->requests.end());
-
- if (buffer->size > std::numeric_limits<size_t>::max()) {
- // On buffer creation, we check that mappable buffers do not exceed this size.
- // So this buffer must not have mappable usage. Inject a validation error.
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
- WGPUErrorType_Validation,
- "Buffer needs the correct map usage bit");
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
- return;
- }
-
- // Create a ReadHandle for the map request. This is the client's intent to read GPU
- // memory.
- MemoryTransferService::ReadHandle* readHandle =
- buffer->device->GetClient()->GetMemoryTransferService()->CreateReadHandle(
- static_cast<size_t>(buffer->size));
- if (readHandle == nullptr) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
- WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
- return;
- }
-
- Buffer::MapRequestData request = {};
- request.readCallback = callback;
- request.userdata = userdata;
- // The handle is owned by the MapRequest until the callback returns.
- request.readHandle = std::unique_ptr<MemoryTransferService::ReadHandle>(readHandle);
-
- // Store a mapping from serial -> MapRequest. The client can map/unmap before the map
- // operations are returned by the server so multiple requests may be in flight.
- buffer->requests[serial] = std::move(request);
-
- SerializeBufferMapAsync(buffer, serial, readHandle);
- }
-
- void ClientHandwrittenBufferMapWriteAsync(WGPUBuffer cBuffer,
- WGPUBufferMapWriteCallback callback,
- void* userdata) {
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- uint32_t serial = buffer->requestSerial++;
- ASSERT(buffer->requests.find(serial) == buffer->requests.end());
-
- if (buffer->size > std::numeric_limits<size_t>::max()) {
- // On buffer creation, we check that mappable buffers do not exceed this size.
- // So this buffer must not have mappable usage. Inject a validation error.
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
- WGPUErrorType_Validation,
- "Buffer needs the correct map usage bit");
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
- return;
- }
-
- // Create a WriteHandle for the map request. This is the client's intent to write GPU
- // memory.
- MemoryTransferService::WriteHandle* writeHandle =
- buffer->device->GetClient()->GetMemoryTransferService()->CreateWriteHandle(
- static_cast<size_t>(buffer->size));
- if (writeHandle == nullptr) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(buffer->device),
- WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
- callback(WGPUBufferMapAsyncStatus_Error, nullptr, 0, userdata);
- return;
- }
-
- Buffer::MapRequestData request = {};
- request.writeCallback = callback;
- request.userdata = userdata;
- // The handle is owned by the MapRequest until the callback returns.
- request.writeHandle = std::unique_ptr<MemoryTransferService::WriteHandle>(writeHandle);
-
- // Store a mapping from serial -> MapRequest. The client can map/unmap before the map
- // operations are returned by the server so multiple requests may be in flight.
- buffer->requests[serial] = std::move(request);
-
- SerializeBufferMapAsync(buffer, serial, writeHandle);
- }
-
- WGPUBuffer ClientHandwrittenDeviceCreateBuffer(WGPUDevice cDevice,
- const WGPUBufferDescriptor* descriptor) {
- Device* device = reinterpret_cast<Device*>(cDevice);
- Client* wireClient = device->GetClient();
-
- if ((descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 &&
- descriptor->size > std::numeric_limits<size_t>::max()) {
- ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
- "Buffer is too large for map usage");
- return ClientDeviceCreateErrorBuffer(cDevice);
- }
-
- auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device);
- Buffer* buffer = bufferObjectAndSerial->object.get();
- // Store the size of the buffer so that mapping operations can allocate a
- // MemoryTransfer handle of the proper size.
- buffer->size = descriptor->size;
-
- DeviceCreateBufferCmd cmd;
- cmd.self = cDevice;
- cmd.descriptor = descriptor;
- cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
-
- wireClient->SerializeCommand(cmd);
-
- return reinterpret_cast<WGPUBuffer>(buffer);
- }
-
- WGPUCreateBufferMappedResult ClientHandwrittenDeviceCreateBufferMapped(
- WGPUDevice cDevice,
- const WGPUBufferDescriptor* descriptor) {
- Device* device = reinterpret_cast<Device*>(cDevice);
- Client* wireClient = device->GetClient();
-
- WGPUCreateBufferMappedResult result;
- result.data = nullptr;
- result.dataLength = 0;
-
- // This buffer is too large to be mapped and to make a WriteHandle for.
- if (descriptor->size > std::numeric_limits<size_t>::max()) {
- ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
- "Buffer is too large for mapping");
- result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
- return result;
- }
-
- // Create a WriteHandle for the map request. This is the client's intent to write GPU
- // memory.
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle =
- std::unique_ptr<MemoryTransferService::WriteHandle>(
- wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
-
- if (writeHandle == nullptr) {
- ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
- "Buffer mapping allocation failed");
- result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
- return result;
- }
-
- // CreateBufferMapped is synchronous and the staging buffer for upload should be immediately
- // available.
- // Open the WriteHandle. This returns a pointer and size of mapped memory.
- // |result.data| may be null on error.
- std::tie(result.data, result.dataLength) = writeHandle->Open();
- if (result.data == nullptr) {
- ClientDeviceInjectError(cDevice, WGPUErrorType_OutOfMemory,
- "Buffer mapping allocation failed");
- result.buffer = ClientDeviceCreateErrorBuffer(cDevice);
- return result;
- }
-
- auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device);
- Buffer* buffer = bufferObjectAndSerial->object.get();
- buffer->size = descriptor->size;
- // Successfully created staging memory. The buffer now owns the WriteHandle.
- buffer->writeHandle = std::move(writeHandle);
-
- result.buffer = reinterpret_cast<WGPUBuffer>(buffer);
-
- // Get the serialization size of the WriteHandle.
- size_t handleCreateInfoLength = buffer->writeHandle->SerializeCreateSize();
-
- DeviceCreateBufferMappedCmd cmd;
- cmd.device = cDevice;
- cmd.descriptor = descriptor;
- cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
- cmd.handleCreateInfoLength = handleCreateInfoLength;
- cmd.handleCreateInfo = nullptr;
-
- char* writeHandleSpace =
- buffer->device->GetClient()->SerializeCommand(cmd, handleCreateInfoLength);
-
- // Serialize the WriteHandle into the space after the command.
- buffer->writeHandle->SerializeCreate(writeHandleSpace);
-
- return result;
- }
-
- void ClientHandwrittenDevicePushErrorScope(WGPUDevice cDevice, WGPUErrorFilter filter) {
- Device* device = reinterpret_cast<Device*>(cDevice);
- device->PushErrorScope(filter);
- }
-
- bool ClientHandwrittenDevicePopErrorScope(WGPUDevice cDevice,
- WGPUErrorCallback callback,
- void* userdata) {
- Device* device = reinterpret_cast<Device*>(cDevice);
- return device->RequestPopErrorScope(callback, userdata);
- }
-
- uint64_t ClientHandwrittenFenceGetCompletedValue(WGPUFence cSelf) {
- auto fence = reinterpret_cast<Fence*>(cSelf);
- return fence->completedValue;
- }
-
- void ClientHandwrittenFenceOnCompletion(WGPUFence cFence,
- uint64_t value,
- WGPUFenceOnCompletionCallback callback,
- void* userdata) {
- Fence* fence = reinterpret_cast<Fence*>(cFence);
- if (value > fence->signaledValue) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
- WGPUErrorType_Validation,
- "Value greater than fence signaled value");
- callback(WGPUFenceCompletionStatus_Error, userdata);
- return;
- }
-
- if (value <= fence->completedValue) {
- callback(WGPUFenceCompletionStatus_Success, userdata);
- return;
- }
-
- Fence::OnCompletionData request;
- request.completionCallback = callback;
- request.userdata = userdata;
- fence->requests.Enqueue(std::move(request), value);
- }
-
- void ClientHandwrittenBufferSetSubData(WGPUBuffer cBuffer,
- uint64_t start,
- uint64_t count,
- const void* data) {
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- BufferSetSubDataInternalCmd cmd;
- cmd.bufferId = buffer->id;
- cmd.start = start;
- cmd.count = count;
- cmd.data = static_cast<const uint8_t*>(data);
-
- buffer->device->GetClient()->SerializeCommand(cmd);
- }
-
- void ClientHandwrittenBufferUnmap(WGPUBuffer cBuffer) {
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- // Invalidate the local pointer, and cancel all other in-flight requests that would
- // turn into errors anyway (you can't double map). This prevents race when the following
- // happens, where the application code would have unmapped a buffer but still receive a
- // callback:
- // - Client -> Server: MapRequest1, Unmap, MapRequest2
- // - Server -> Client: Result of MapRequest1
- // - Unmap locally on the client
- // - Server -> Client: Result of MapRequest2
- if (buffer->writeHandle) {
- // Writes need to be flushed before Unmap is sent. Unmap calls all associated
- // in-flight callbacks which may read the updated data.
- ASSERT(buffer->readHandle == nullptr);
-
- // Get the serialization size of metadata to flush writes.
- size_t writeFlushInfoLength = buffer->writeHandle->SerializeFlushSize();
-
- BufferUpdateMappedDataCmd cmd;
- cmd.bufferId = buffer->id;
- cmd.writeFlushInfoLength = writeFlushInfoLength;
- cmd.writeFlushInfo = nullptr;
-
- char* writeHandleSpace =
- buffer->device->GetClient()->SerializeCommand(cmd, writeFlushInfoLength);
-
- // Serialize flush metadata into the space after the command.
- // This closes the handle for writing.
- buffer->writeHandle->SerializeFlush(writeHandleSpace);
- buffer->writeHandle = nullptr;
-
- } else if (buffer->readHandle) {
- buffer->readHandle = nullptr;
- }
- buffer->ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
-
- BufferUnmapCmd cmd;
- cmd.self = cBuffer;
- buffer->device->GetClient()->SerializeCommand(cmd);
- }
-
- void ClientHandwrittenBufferDestroy(WGPUBuffer cBuffer) {
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- // Cancel or remove all mappings
- buffer->writeHandle = nullptr;
- buffer->readHandle = nullptr;
- buffer->ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
-
- BufferDestroyCmd cmd;
- cmd.self = cBuffer;
- buffer->device->GetClient()->SerializeCommand(cmd);
- }
-
- WGPUFence ClientHandwrittenQueueCreateFence(WGPUQueue cSelf,
- WGPUFenceDescriptor const* descriptor) {
- Queue* queue = reinterpret_cast<Queue*>(cSelf);
- Device* device = queue->device;
-
- QueueCreateFenceCmd cmd;
- cmd.self = cSelf;
- auto* allocation = device->GetClient()->FenceAllocator().New(device);
- cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
- cmd.descriptor = descriptor;
-
- device->GetClient()->SerializeCommand(cmd);
-
- WGPUFence cFence = reinterpret_cast<WGPUFence>(allocation->object.get());
-
- Fence* fence = reinterpret_cast<Fence*>(cFence);
- fence->queue = queue;
-
- uint64_t initialValue = descriptor != nullptr ? descriptor->initialValue : 0u;
- fence->signaledValue = initialValue;
- fence->completedValue = initialValue;
- return cFence;
- }
-
- void ClientHandwrittenQueueSignal(WGPUQueue cQueue, WGPUFence cFence, uint64_t signalValue) {
- Fence* fence = reinterpret_cast<Fence*>(cFence);
- Queue* queue = reinterpret_cast<Queue*>(cQueue);
- if (fence->queue != queue) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
- WGPUErrorType_Validation,
- "Fence must be signaled on the queue on which it was created.");
- return;
- }
- if (signalValue <= fence->signaledValue) {
- ClientDeviceInjectError(reinterpret_cast<WGPUDevice>(fence->device),
- WGPUErrorType_Validation,
- "Fence value less than or equal to signaled value");
- return;
- }
- fence->signaledValue = signalValue;
-
- QueueSignalCmd cmd;
- cmd.self = cQueue;
- cmd.fence = cFence;
- cmd.signalValue = signalValue;
-
- queue->device->GetClient()->SerializeCommand(cmd);
- }
-
- void ClientHandwrittenQueueWriteBuffer(WGPUQueue cQueue,
- WGPUBuffer cBuffer,
- uint64_t bufferOffset,
- const void* data,
- size_t size) {
- Queue* queue = reinterpret_cast<Queue*>(cQueue);
- Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
-
- QueueWriteBufferInternalCmd cmd;
- cmd.queueId = queue->id;
- cmd.bufferId = buffer->id;
- cmd.bufferOffset = bufferOffset;
- cmd.data = static_cast<const uint8_t*>(data);
- cmd.size = size;
-
- queue->device->GetClient()->SerializeCommand(cmd);
- }
-
- void ClientDeviceReference(WGPUDevice) {
- }
-
- void ClientDeviceRelease(WGPUDevice) {
- }
-
- WGPUQueue ClientHandwrittenDeviceGetDefaultQueue(WGPUDevice cSelf) {
- Device* device = reinterpret_cast<Device*>(cSelf);
- return device->GetDefaultQueue();
- }
-
- void ClientHandwrittenDeviceSetUncapturedErrorCallback(WGPUDevice cSelf,
- WGPUErrorCallback callback,
- void* userdata) {
- Device* device = reinterpret_cast<Device*>(cSelf);
- device->SetUncapturedErrorCallback(callback, userdata);
- }
- void ClientHandwrittenDeviceSetDeviceLostCallback(WGPUDevice cSelf,
- WGPUDeviceLostCallback callback,
- void* userdata) {
- Device* device = reinterpret_cast<Device*>(cSelf);
- device->SetDeviceLostCallback(callback, userdata);
- }
-
-}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
index 3548f5133fb..86f5944cc4a 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.cpp
@@ -14,8 +14,92 @@
#include "dawn_wire/client/Buffer.h"
+#include "dawn_wire/client/Client.h"
+#include "dawn_wire/client/Device.h"
+
namespace dawn_wire { namespace client {
+ // static
+ WGPUBuffer Buffer::Create(Device* device_, const WGPUBufferDescriptor* descriptor) {
+ Client* wireClient = device_->GetClient();
+
+ bool mappable =
+ (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
+ descriptor->mappedAtCreation;
+ if (mappable && descriptor->size > std::numeric_limits<size_t>::max()) {
+ device_->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
+ return device_->CreateErrorBuffer();
+ }
+
+ std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+ void* writeData = nullptr;
+ size_t writeHandleCreateInfoLength = 0;
+
+ // If the buffer is mapped at creation, create a write handle that will represent the
+ // mapping of the whole buffer.
+ if (descriptor->mappedAtCreation) {
+ // Create the handle.
+ writeHandle.reset(
+ wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
+ if (writeHandle == nullptr) {
+ device_->InjectError(WGPUErrorType_OutOfMemory, "Buffer mapping allocation failed");
+ return device_->CreateErrorBuffer();
+ }
+
+ // Open the handle, it may fail by returning a nullptr in writeData.
+ size_t writeDataLength = 0;
+ std::tie(writeData, writeDataLength) = writeHandle->Open();
+ if (writeData == nullptr) {
+ device_->InjectError(WGPUErrorType_OutOfMemory, "Buffer mapping allocation failed");
+ return device_->CreateErrorBuffer();
+ }
+ ASSERT(writeDataLength == descriptor->size);
+
+ // Get the serialization size of the write handle.
+ writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+ }
+
+ // Create the buffer and send the creation command.
+ auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(device_);
+ Buffer* buffer = bufferObjectAndSerial->object.get();
+ buffer->mSize = descriptor->size;
+
+ DeviceCreateBufferCmd cmd;
+ cmd.device = ToAPI(device_);
+ cmd.descriptor = descriptor;
+ cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+ cmd.handleCreateInfoLength = writeHandleCreateInfoLength;
+ cmd.handleCreateInfo = nullptr;
+
+ char* writeHandleSpace = wireClient->SerializeCommand(cmd, writeHandleCreateInfoLength);
+
+ if (descriptor->mappedAtCreation) {
+ // Serialize the WriteHandle into the space after the command.
+ writeHandle->SerializeCreate(writeHandleSpace);
+
+ // Set the buffer state for the mapping at creation. The buffer now owns the write
+ // handle..
+ buffer->mWriteHandle = std::move(writeHandle);
+ buffer->mMappedData = writeData;
+ buffer->mMapOffset = 0;
+ buffer->mMapSize = buffer->mSize;
+ }
+
+ return ToAPI(buffer);
+ }
+
+ // static
+ WGPUBuffer Buffer::CreateError(Device* device_) {
+ auto* allocation = device_->GetClient()->BufferAllocator().New(device_);
+
+ DeviceCreateErrorBufferCmd cmd;
+ cmd.self = ToAPI(device_);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+ device_->GetClient()->SerializeCommand(cmd);
+
+ return ToAPI(allocation->object.get());
+ }
+
Buffer::~Buffer() {
// Callbacks need to be fired in all cases, as they can handle freeing resources
// so we call them with "Unknown" status.
@@ -23,14 +107,330 @@ namespace dawn_wire { namespace client {
}
void Buffer::ClearMapRequests(WGPUBufferMapAsyncStatus status) {
- for (auto& it : requests) {
- if (it.second.writeHandle) {
- it.second.writeCallback(status, nullptr, 0, it.second.userdata);
+ for (auto& it : mRequests) {
+ if (it.second.callback) {
+ it.second.callback(status, it.second.userdata);
+ }
+ }
+ mRequests.clear();
+ }
+
+ void Buffer::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
+ struct ProxyData {
+ WGPUBufferMapReadCallback callback;
+ void* userdata;
+ Buffer* self;
+ };
+ ProxyData* proxy = new ProxyData;
+ proxy->callback = callback;
+ proxy->userdata = userdata;
+ proxy->self = this;
+
+ MapAsync(
+ WGPUMapMode_Read, 0, mSize,
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ ProxyData* proxy = static_cast<ProxyData*>(userdata);
+ Buffer* self = proxy->self;
+
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ // On buffer creation we assert that a mappable buffer cannot be bigger than
+ // MAX_SIZE_T so we should never have a successful mapping in this case.
+ ASSERT(self->mSize <= std::numeric_limits<size_t>::max());
+ self->mMapOffset = 0;
+ self->mMapSize = self->mSize;
+ }
+
+ if (proxy->callback) {
+ const void* data = self->GetConstMappedRange(0, self->mSize);
+ uint64_t dataLength = data == nullptr ? 0 : self->mSize;
+ proxy->callback(status, data, dataLength, proxy->userdata);
+ }
+
+ delete proxy;
+ },
+ proxy);
+ }
+
+ void Buffer::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
+ struct ProxyData {
+ WGPUBufferMapWriteCallback callback;
+ void* userdata;
+ Buffer* self;
+ };
+ ProxyData* proxy = new ProxyData;
+ proxy->callback = callback;
+ proxy->userdata = userdata;
+ proxy->self = this;
+
+ MapAsync(
+ WGPUMapMode_Write, 0, mSize,
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ ProxyData* proxy = static_cast<ProxyData*>(userdata);
+ Buffer* self = proxy->self;
+
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ // On buffer creation we assert that a mappable buffer cannot be bigger than
+ // MAX_SIZE_T so we should never have a successful mapping in this case.
+ ASSERT(self->mSize <= std::numeric_limits<size_t>::max());
+ self->mMapOffset = 0;
+ self->mMapSize = self->mSize;
+ }
+
+ if (proxy->callback) {
+ void* data = self->GetMappedRange(0, self->mSize);
+ uint64_t dataLength = data == nullptr ? 0 : self->mSize;
+ proxy->callback(status, data, dataLength, proxy->userdata);
+ }
+
+ delete proxy;
+ },
+ proxy);
+ }
+
+ void Buffer::MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata) {
+ // Handle the defaulting of size required by WebGPU.
+ if (size == 0 && offset < mSize) {
+ size = mSize - offset;
+ }
+
+ bool isReadMode = mode & WGPUMapMode_Read;
+ bool isWriteMode = mode & WGPUMapMode_Write;
+
+ // Step 1. Do early validation of READ ^ WRITE because the server rejects mode = 0.
+ if (!(isReadMode ^ isWriteMode)) {
+ device->InjectError(WGPUErrorType_Validation, "MapAsync error (you figure out :P)");
+ if (callback != nullptr) {
+ callback(WGPUBufferMapAsyncStatus_Error, userdata);
+ }
+ return;
+ }
+
+ // Step 2. Create the request structure that will hold information while this mapping is
+ // in flight.
+ uint32_t serial = mRequestSerial++;
+ ASSERT(mRequests.find(serial) == mRequests.end());
+
+ Buffer::MapRequestData request = {};
+ request.callback = callback;
+ request.userdata = userdata;
+ request.size = size;
+ request.offset = offset;
+
+ // Step 2a: Create the read / write handles for this request.
+ if (isReadMode) {
+ request.readHandle.reset(
+ device->GetClient()->GetMemoryTransferService()->CreateReadHandle(size));
+ if (request.readHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ callback(WGPUBufferMapAsyncStatus_Error, userdata);
+ return;
+ }
+ } else {
+ ASSERT(isWriteMode);
+ request.writeHandle.reset(
+ device->GetClient()->GetMemoryTransferService()->CreateWriteHandle(size));
+ if (request.writeHandle == nullptr) {
+ device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+ callback(WGPUBufferMapAsyncStatus_Error, userdata);
+ return;
+ }
+ }
+
+ // Step 3. Serialize the command to send to the server.
+ BufferMapAsyncCmd cmd;
+ cmd.bufferId = this->id;
+ cmd.requestSerial = serial;
+ cmd.mode = mode;
+ cmd.offset = offset;
+ cmd.size = size;
+ cmd.handleCreateInfo = nullptr;
+
+ // Step 3a. Fill the handle create info in the command.
+ if (isReadMode) {
+ cmd.handleCreateInfoLength = request.readHandle->SerializeCreateSize();
+ char* handleCreateInfoSpace =
+ device->GetClient()->SerializeCommand(cmd, cmd.handleCreateInfoLength);
+ request.readHandle->SerializeCreate(handleCreateInfoSpace);
+ } else {
+ ASSERT(isWriteMode);
+ cmd.handleCreateInfoLength = request.writeHandle->SerializeCreateSize();
+ char* handleCreateInfoSpace =
+ device->GetClient()->SerializeCommand(cmd, cmd.handleCreateInfoLength);
+ request.writeHandle->SerializeCreate(handleCreateInfoSpace);
+ }
+
+ // Step 4. Register this request so that we can retrieve it from its serial when the server
+ // sends the callback.
+ mRequests[serial] = std::move(request);
+ }
+
+ bool Buffer::OnMapAsyncCallback(uint32_t requestSerial,
+ uint32_t status,
+ uint64_t readInitialDataInfoLength,
+ const uint8_t* readInitialDataInfo) {
+ // The requests can have been deleted via an Unmap so this isn't an error.
+ auto requestIt = mRequests.find(requestSerial);
+ if (requestIt == mRequests.end()) {
+ return true;
+ }
+
+ auto request = std::move(requestIt->second);
+ // Delete the request before calling the callback otherwise the callback could be fired a
+ // second time. If, for example, buffer.Unmap() is called inside the callback.
+ mRequests.erase(requestIt);
+
+ auto FailRequest = [&request]() -> bool {
+ if (request.callback != nullptr) {
+ request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
+ }
+ return false;
+ };
+
+ bool isRead = request.readHandle != nullptr;
+ bool isWrite = request.writeHandle != nullptr;
+ ASSERT(isRead != isWrite);
+
+ size_t mappedDataLength = 0;
+ const void* mappedData = nullptr;
+ if (status == WGPUBufferMapAsyncStatus_Success) {
+ if (mReadHandle || mWriteHandle) {
+ // Buffer is already mapped.
+ return FailRequest();
+ }
+
+ if (isRead) {
+ if (readInitialDataInfoLength > std::numeric_limits<size_t>::max()) {
+ // This is the size of data deserialized from the command stream, which must be
+ // CPU-addressable.
+ return FailRequest();
+ }
+
+ // The server serializes metadata to initialize the contents of the ReadHandle.
+ // Deserialize the message and return a pointer and size of the mapped data for
+ // reading.
+ if (!request.readHandle->DeserializeInitialData(
+ readInitialDataInfo, static_cast<size_t>(readInitialDataInfoLength),
+ &mappedData, &mappedDataLength)) {
+ // Deserialization shouldn't fail. This is a fatal error.
+ return FailRequest();
+ }
+ ASSERT(mappedData != nullptr);
+
} else {
- it.second.readCallback(status, nullptr, 0, it.second.userdata);
+ // Open the WriteHandle. This returns a pointer and size of mapped memory.
+ // On failure, |mappedData| may be null.
+ std::tie(mappedData, mappedDataLength) = request.writeHandle->Open();
+
+ if (mappedData == nullptr) {
+ return FailRequest();
+ }
}
+
+ // The MapAsync request was successful. The buffer now owns the Read/Write handles
+ // until Unmap().
+ mReadHandle = std::move(request.readHandle);
+ mWriteHandle = std::move(request.writeHandle);
+ }
+
+ mMapOffset = request.offset;
+ mMapSize = request.size;
+ mMappedData = const_cast<void*>(mappedData);
+ if (request.callback) {
+ request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
+ }
+
+ return true;
+ }
+
+ void* Buffer::GetMappedRange(size_t offset, size_t size) {
+ if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
}
- requests.clear();
+ return static_cast<uint8_t*>(mMappedData) + (offset - mMapOffset);
}
+ const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
+ if (!(IsMappedForWriting() || IsMappedForReading()) ||
+ !CheckGetMappedRangeOffsetSize(offset, size)) {
+ return nullptr;
+ }
+ return static_cast<uint8_t*>(mMappedData) + (offset - mMapOffset);
+ }
+
+ void Buffer::Unmap() {
+ // Invalidate the local pointer, and cancel all other in-flight requests that would
+ // turn into errors anyway (you can't double map). This prevents race when the following
+ // happens, where the application code would have unmapped a buffer but still receive a
+ // callback:
+ // - Client -> Server: MapRequest1, Unmap, MapRequest2
+ // - Server -> Client: Result of MapRequest1
+ // - Unmap locally on the client
+ // - Server -> Client: Result of MapRequest2
+ if (mWriteHandle) {
+ // Writes need to be flushed before Unmap is sent. Unmap calls all associated
+ // in-flight callbacks which may read the updated data.
+ ASSERT(mReadHandle == nullptr);
+
+ // Get the serialization size of metadata to flush writes.
+ size_t writeFlushInfoLength = mWriteHandle->SerializeFlushSize();
+
+ BufferUpdateMappedDataCmd cmd;
+ cmd.bufferId = id;
+ cmd.writeFlushInfoLength = writeFlushInfoLength;
+ cmd.writeFlushInfo = nullptr;
+
+ char* writeHandleSpace =
+ device->GetClient()->SerializeCommand(cmd, writeFlushInfoLength);
+
+ // Serialize flush metadata into the space after the command.
+ // This closes the handle for writing.
+ mWriteHandle->SerializeFlush(writeHandleSpace);
+ mWriteHandle = nullptr;
+
+ } else if (mReadHandle) {
+ mReadHandle = nullptr;
+ }
+
+ mMappedData = nullptr;
+ mMapOffset = 0;
+ mMapSize = 0;
+ ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
+
+ BufferUnmapCmd cmd;
+ cmd.self = ToAPI(this);
+ device->GetClient()->SerializeCommand(cmd);
+ }
+
+ void Buffer::Destroy() {
+ // Cancel or remove all mappings
+ mWriteHandle = nullptr;
+ mReadHandle = nullptr;
+ mMappedData = nullptr;
+ ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
+
+ BufferDestroyCmd cmd;
+ cmd.self = ToAPI(this);
+ device->GetClient()->SerializeCommand(cmd);
+ }
+
+ bool Buffer::IsMappedForReading() const {
+ return mReadHandle != nullptr;
+ }
+
+ bool Buffer::IsMappedForWriting() const {
+ return mWriteHandle != nullptr;
+ }
+
+ bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
+ if (size > mMapSize || offset < mMapOffset) {
+ return false;
+ }
+
+ size_t offsetInMappedRange = offset - mMapOffset;
+ return offsetInMappedRange <= mMapSize - size;
+ }
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
index 09da8e34977..5c32c751fc7 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Buffer.h
@@ -24,33 +24,62 @@
namespace dawn_wire { namespace client {
- struct Buffer : ObjectBase {
+ class Buffer : public ObjectBase {
+ public:
using ObjectBase::ObjectBase;
+ static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
+ static WGPUBuffer CreateError(Device* device);
+
~Buffer();
void ClearMapRequests(WGPUBufferMapAsyncStatus status);
+ void MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata);
+ void MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata);
+ bool OnMapAsyncCallback(uint32_t requestSerial,
+ uint32_t status,
+ uint64_t readInitialDataInfoLength,
+ const uint8_t* readInitialDataInfo);
+ void MapAsync(WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
+ WGPUBufferMapCallback callback,
+ void* userdata);
+ void* GetMappedRange(size_t offset, size_t size);
+ const void* GetConstMappedRange(size_t offset, size_t size);
+ void Unmap();
+
+ void Destroy();
+
+ private:
+ bool IsMappedForReading() const;
+ bool IsMappedForWriting() const;
+ bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+
// We want to defer all the validation to the server, which means we could have multiple
// map request in flight at a single time and need to track them separately.
// On well-behaved applications, only one request should exist at a single time.
struct MapRequestData {
- // TODO(enga): Use a tagged pointer to save space.
- WGPUBufferMapReadCallback readCallback = nullptr;
- WGPUBufferMapWriteCallback writeCallback = nullptr;
+ WGPUBufferMapCallback callback = nullptr;
void* userdata = nullptr;
+ size_t offset = 0;
+ size_t size = 0;
// TODO(enga): Use a tagged pointer to save space.
std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
};
- std::map<uint32_t, MapRequestData> requests;
- uint32_t requestSerial = 0;
- uint64_t size = 0;
+ std::map<uint32_t, MapRequestData> mRequests;
+ uint32_t mRequestSerial = 0;
+ uint64_t mSize = 0;
// Only one mapped pointer can be active at a time because Unmap clears all the in-flight
// requests.
// TODO(enga): Use a tagged pointer to save space.
- std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
- std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
+ std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
+ void* mMappedData = nullptr;
+ size_t mMapOffset = 0;
+ size_t mMapSize = 0;
};
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
index 1953347f44c..6b5b7be7ea2 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Client.cpp
@@ -20,9 +20,7 @@
namespace dawn_wire { namespace client {
Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
- : ClientBase(),
- mSerializer(serializer),
- mMemoryTransferService(memoryTransferService) {
+ : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
if (mMemoryTransferService == nullptr) {
// If a MemoryTransferService is not provided, fall back to inline memory.
mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
@@ -44,11 +42,11 @@ namespace dawn_wire { namespace client {
}
ReservedTexture Client::ReserveTexture(WGPUDevice cDevice) {
- Device* device = reinterpret_cast<Device*>(cDevice);
+ Device* device = FromAPI(cDevice);
ObjectAllocator<Texture>::ObjectAndSerial* allocation = TextureAllocator().New(device);
ReservedTexture result;
- result.texture = reinterpret_cast<WGPUTexture>(allocation->object.get());
+ result.texture = ToAPI(allocation->object.get());
result.id = allocation->object->id;
result.generation = allocation->generation;
return result;
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
index dd904068692..7fdabc08052 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/ClientDoers.cpp
@@ -43,144 +43,21 @@ namespace dawn_wire { namespace client {
bool Client::DoDevicePopErrorScopeCallback(uint64_t requestSerial,
WGPUErrorType errorType,
const char* message) {
- return mDevice->PopErrorScope(requestSerial, errorType, message);
+ return mDevice->OnPopErrorScopeCallback(requestSerial, errorType, message);
}
- bool Client::DoBufferMapReadAsyncCallback(Buffer* buffer,
- uint32_t requestSerial,
- uint32_t status,
- uint64_t initialDataInfoLength,
- const uint8_t* initialDataInfo) {
+ bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
+ uint32_t requestSerial,
+ uint32_t status,
+ uint64_t readInitialDataInfoLength,
+ const uint8_t* readInitialDataInfo) {
// The buffer might have been deleted or recreated so this isn't an error.
if (buffer == nullptr) {
return true;
}
- // The requests can have been deleted via an Unmap so this isn't an error.
- auto requestIt = buffer->requests.find(requestSerial);
- if (requestIt == buffer->requests.end()) {
- return true;
- }
-
- auto request = std::move(requestIt->second);
- // Delete the request before calling the callback otherwise the callback could be fired a
- // second time. If, for example, buffer.Unmap() is called inside the callback.
- buffer->requests.erase(requestIt);
-
- const void* mappedData = nullptr;
- size_t mappedDataLength = 0;
-
- auto GetMappedData = [&]() -> bool {
- // It is an error for the server to call the read callback when we asked for a map write
- if (request.writeHandle) {
- return false;
- }
-
- if (status == WGPUBufferMapAsyncStatus_Success) {
- if (buffer->readHandle || buffer->writeHandle) {
- // Buffer is already mapped.
- return false;
- }
- if (initialDataInfoLength > std::numeric_limits<size_t>::max()) {
- // This is the size of data deserialized from the command stream, which must be
- // CPU-addressable.
- return false;
- }
- ASSERT(request.readHandle != nullptr);
-
- // The server serializes metadata to initialize the contents of the ReadHandle.
- // Deserialize the message and return a pointer and size of the mapped data for
- // reading.
- if (!request.readHandle->DeserializeInitialData(
- initialDataInfo, static_cast<size_t>(initialDataInfoLength), &mappedData,
- &mappedDataLength)) {
- // Deserialization shouldn't fail. This is a fatal error.
- return false;
- }
- ASSERT(mappedData != nullptr);
-
- // The MapRead request was successful. The buffer now owns the ReadHandle until
- // Unmap().
- buffer->readHandle = std::move(request.readHandle);
- }
-
- return true;
- };
-
- if (!GetMappedData()) {
- // Dawn promises that all callbacks are called in finite time. Even if a fatal error
- // occurs, the callback is called.
- request.readCallback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, request.userdata);
- return false;
- } else {
- request.readCallback(static_cast<WGPUBufferMapAsyncStatus>(status), mappedData,
- static_cast<uint64_t>(mappedDataLength), request.userdata);
- return true;
- }
- }
-
- bool Client::DoBufferMapWriteAsyncCallback(Buffer* buffer,
- uint32_t requestSerial,
- uint32_t status) {
- // The buffer might have been deleted or recreated so this isn't an error.
- if (buffer == nullptr) {
- return true;
- }
-
- // The requests can have been deleted via an Unmap so this isn't an error.
- auto requestIt = buffer->requests.find(requestSerial);
- if (requestIt == buffer->requests.end()) {
- return true;
- }
-
- auto request = std::move(requestIt->second);
- // Delete the request before calling the callback otherwise the callback could be fired a
- // second time. If, for example, buffer.Unmap() is called inside the callback.
- buffer->requests.erase(requestIt);
-
- void* mappedData = nullptr;
- size_t mappedDataLength = 0;
-
- auto GetMappedData = [&]() -> bool {
- // It is an error for the server to call the write callback when we asked for a map read
- if (request.readHandle) {
- return false;
- }
-
- if (status == WGPUBufferMapAsyncStatus_Success) {
- if (buffer->readHandle || buffer->writeHandle) {
- // Buffer is already mapped.
- return false;
- }
- ASSERT(request.writeHandle != nullptr);
-
- // Open the WriteHandle. This returns a pointer and size of mapped memory.
- // On failure, |mappedData| may be null.
- std::tie(mappedData, mappedDataLength) = request.writeHandle->Open();
-
- if (mappedData == nullptr) {
- return false;
- }
-
- // The MapWrite request was successful. The buffer now owns the WriteHandle until
- // Unmap().
- buffer->writeHandle = std::move(request.writeHandle);
- }
-
- return true;
- };
-
- if (!GetMappedData()) {
- // Dawn promises that all callbacks are called in finite time. Even if a fatal error
- // occurs, the callback is called.
- request.writeCallback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0,
- request.userdata);
- return false;
- } else {
- request.writeCallback(static_cast<WGPUBufferMapAsyncStatus>(status), mappedData,
- static_cast<uint64_t>(mappedDataLength), request.userdata);
- return true;
- }
+ return buffer->OnMapAsyncCallback(requestSerial, status, readInitialDataInfoLength,
+ readInitialDataInfo);
}
bool Client::DoFenceUpdateCompletedValue(Fence* fence, uint64_t value) {
@@ -189,8 +66,7 @@ namespace dawn_wire { namespace client {
return true;
}
- fence->completedValue = value;
- fence->CheckPassedFences();
+ fence->OnUpdateCompletedValueCallback(value);
return true;
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
index 43361b3e625..addc9773599 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.cpp
@@ -31,7 +31,7 @@ namespace dawn_wire { namespace client {
mDefaultQueue = allocation->object.get();
DeviceGetDefaultQueueCmd cmd;
- cmd.self = reinterpret_cast<WGPUDevice>(this);
+ cmd.self = ToAPI(this);
cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
mClient->SerializeCommand(cmd);
@@ -85,13 +85,13 @@ namespace dawn_wire { namespace client {
mErrorScopeStackSize++;
DevicePushErrorScopeCmd cmd;
- cmd.self = reinterpret_cast<WGPUDevice>(this);
+ cmd.self = ToAPI(this);
cmd.filter = filter;
mClient->SerializeCommand(cmd);
}
- bool Device::RequestPopErrorScope(WGPUErrorCallback callback, void* userdata) {
+ bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
if (mErrorScopeStackSize == 0) {
return false;
}
@@ -103,7 +103,7 @@ namespace dawn_wire { namespace client {
mErrorScopes[serial] = {callback, userdata};
DevicePopErrorScopeCmd cmd;
- cmd.device = reinterpret_cast<WGPUDevice>(this);
+ cmd.device = ToAPI(this);
cmd.requestSerial = serial;
mClient->SerializeCommand(cmd);
@@ -111,7 +111,9 @@ namespace dawn_wire { namespace client {
return true;
}
- bool Device::PopErrorScope(uint64_t requestSerial, WGPUErrorType type, const char* message) {
+ bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
+ WGPUErrorType type,
+ const char* message) {
switch (type) {
case WGPUErrorType_NoError:
case WGPUErrorType_Validation:
@@ -135,9 +137,38 @@ namespace dawn_wire { namespace client {
return true;
}
+ void Device::InjectError(WGPUErrorType type, const char* message) {
+ DeviceInjectErrorCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.type = type;
+ cmd.message = message;
+ mClient->SerializeCommand(cmd);
+ }
+
+ WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
+ return Buffer::Create(this, descriptor);
+ }
+
+ WGPUCreateBufferMappedResult Device::CreateBufferMapped(
+ const WGPUBufferDescriptor* descriptor) {
+ WGPUBufferDescriptor descMappedAtCreation = *descriptor;
+ descMappedAtCreation.mappedAtCreation = true;
+
+ WGPUCreateBufferMappedResult result;
+ result.buffer = CreateBuffer(&descMappedAtCreation);
+ result.data = FromAPI(result.buffer)->GetMappedRange(0, descriptor->size);
+ result.dataLength = result.data == nullptr ? 0 : descriptor->size;
+
+ return result;
+ }
+
+ WGPUBuffer Device::CreateErrorBuffer() {
+ return Buffer::CreateError(this);
+ }
+
WGPUQueue Device::GetDefaultQueue() {
mDefaultQueue->refcount++;
- return reinterpret_cast<WGPUQueue>(mDefaultQueue);
+ return ToAPI(mDefaultQueue);
}
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Device.h b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
index f32259a76f7..2b554e34e89 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Device.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Device.h
@@ -24,7 +24,7 @@
namespace dawn_wire { namespace client {
class Client;
- struct Queue;
+ class Queue;
class Device : public ObjectBase {
public:
@@ -32,14 +32,20 @@ namespace dawn_wire { namespace client {
~Device();
Client* GetClient();
- void HandleError(WGPUErrorType errorType, const char* message);
- void HandleDeviceLost(const char* message);
void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
-
+ void InjectError(WGPUErrorType type, const char* message);
void PushErrorScope(WGPUErrorFilter filter);
- bool RequestPopErrorScope(WGPUErrorCallback callback, void* userdata);
- bool PopErrorScope(uint64_t requestSerial, WGPUErrorType type, const char* message);
+ bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
+ WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
+ WGPUCreateBufferMappedResult CreateBufferMapped(const WGPUBufferDescriptor* descriptor);
+ WGPUBuffer CreateErrorBuffer();
+
+ void HandleError(WGPUErrorType errorType, const char* message);
+ void HandleDeviceLost(const char* message);
+ bool OnPopErrorScopeCallback(uint64_t requestSerial,
+ WGPUErrorType type,
+ const char* message);
WGPUQueue GetDefaultQueue();
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
index 607483e520f..d9e1fa7ed32 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Fence.cpp
@@ -14,22 +14,74 @@
#include "dawn_wire/client/Fence.h"
+#include "dawn_wire/client/Device.h"
+
namespace dawn_wire { namespace client {
Fence::~Fence() {
// Callbacks need to be fired in all cases, as they can handle freeing resources
// so we call them with "Unknown" status.
- for (auto& request : requests.IterateAll()) {
+ for (auto& request : mRequests.IterateAll()) {
request.completionCallback(WGPUFenceCompletionStatus_Unknown, request.userdata);
}
- requests.Clear();
+ mRequests.Clear();
+ }
+
+ void Fence::Initialize(Queue* queue, const WGPUFenceDescriptor* descriptor) {
+ mQueue = queue;
+
+ uint64_t initialValue = descriptor != nullptr ? descriptor->initialValue : 0u;
+ mSignaledValue = initialValue;
+ mCompletedValue = initialValue;
}
void Fence::CheckPassedFences() {
- for (auto& request : requests.IterateUpTo(completedValue)) {
+ for (auto& request : mRequests.IterateUpTo(mCompletedValue)) {
request.completionCallback(WGPUFenceCompletionStatus_Success, request.userdata);
}
- requests.ClearUpTo(completedValue);
+ mRequests.ClearUpTo(mCompletedValue);
+ }
+
+ void Fence::OnCompletion(uint64_t value,
+ WGPUFenceOnCompletionCallback callback,
+ void* userdata) {
+ if (value > mSignaledValue) {
+ device->InjectError(WGPUErrorType_Validation,
+ "Value greater than fence signaled value");
+ callback(WGPUFenceCompletionStatus_Error, userdata);
+ return;
+ }
+
+ if (value <= mCompletedValue) {
+ callback(WGPUFenceCompletionStatus_Success, userdata);
+ return;
+ }
+
+ Fence::OnCompletionData request;
+ request.completionCallback = callback;
+ request.userdata = userdata;
+ mRequests.Enqueue(std::move(request), value);
+ }
+
+ void Fence::OnUpdateCompletedValueCallback(uint64_t value) {
+ mCompletedValue = value;
+ CheckPassedFences();
+ }
+
+ uint64_t Fence::GetCompletedValue() const {
+ return mCompletedValue;
+ }
+
+ uint64_t Fence::GetSignaledValue() const {
+ return mSignaledValue;
+ }
+
+ Queue* Fence::GetQueue() const {
+ return mQueue;
+ }
+
+ void Fence::SetSignaledValue(uint64_t value) {
+ mSignaledValue = value;
}
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Fence.h b/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
index 4acde6d6837..107b4e7f440 100644
--- a/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Fence.h
@@ -22,21 +22,33 @@
namespace dawn_wire { namespace client {
- struct Queue;
- struct Fence : ObjectBase {
+ class Queue;
+ class Fence : public ObjectBase {
+ public:
using ObjectBase::ObjectBase;
~Fence();
+ void Initialize(Queue* queue, const WGPUFenceDescriptor* descriptor);
+
void CheckPassedFences();
+ void OnCompletion(uint64_t value, WGPUFenceOnCompletionCallback callback, void* userdata);
+ void OnUpdateCompletedValueCallback(uint64_t value);
+
+ uint64_t GetCompletedValue() const;
+ uint64_t GetSignaledValue() const;
+ Queue* GetQueue() const;
+
+ void SetSignaledValue(uint64_t value);
+ private:
struct OnCompletionData {
WGPUFenceOnCompletionCallback completionCallback = nullptr;
void* userdata = nullptr;
};
- Queue* queue = nullptr;
- uint64_t signaledValue = 0;
- uint64_t completedValue = 0;
- SerialMap<OnCompletionData> requests;
+ Queue* mQueue = nullptr;
+ uint64_t mSignaledValue = 0;
+ uint64_t mCompletedValue = 0;
+ SerialMap<OnCompletionData> mRequests;
};
}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
new file mode 100644
index 00000000000..ad116732ff3
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.cpp
@@ -0,0 +1,91 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dawn_wire/client/Queue.h"
+
+#include "dawn_wire/client/Client.h"
+#include "dawn_wire/client/Device.h"
+
+namespace dawn_wire { namespace client {
+
+ WGPUFence Queue::CreateFence(WGPUFenceDescriptor const* descriptor) {
+ auto* allocation = device->GetClient()->FenceAllocator().New(device);
+
+ QueueCreateFenceCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+ cmd.descriptor = descriptor;
+ device->GetClient()->SerializeCommand(cmd);
+
+ Fence* fence = allocation->object.get();
+ fence->Initialize(this, descriptor);
+ return ToAPI(fence);
+ }
+
+ void Queue::Signal(WGPUFence cFence, uint64_t signalValue) {
+ Fence* fence = FromAPI(cFence);
+ if (fence->GetQueue() != this) {
+ device->InjectError(WGPUErrorType_Validation,
+ "Fence must be signaled on the queue on which it was created.");
+ return;
+ }
+ if (signalValue <= fence->GetSignaledValue()) {
+ device->InjectError(WGPUErrorType_Validation,
+ "Fence value less than or equal to signaled value");
+ return;
+ }
+
+ fence->SetSignaledValue(signalValue);
+
+ QueueSignalCmd cmd;
+ cmd.self = ToAPI(this);
+ cmd.fence = cFence;
+ cmd.signalValue = signalValue;
+
+ device->GetClient()->SerializeCommand(cmd);
+ }
+
+ void Queue::WriteBuffer(WGPUBuffer cBuffer,
+ uint64_t bufferOffset,
+ const void* data,
+ size_t size) {
+ Buffer* buffer = FromAPI(cBuffer);
+
+ QueueWriteBufferInternalCmd cmd;
+ cmd.queueId = id;
+ cmd.bufferId = buffer->id;
+ cmd.bufferOffset = bufferOffset;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.size = size;
+
+ device->GetClient()->SerializeCommand(cmd);
+ }
+
+ void Queue::WriteTexture(const WGPUTextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ QueueWriteTextureInternalCmd cmd;
+ cmd.queueId = id;
+ cmd.destination = destination;
+ cmd.data = static_cast<const uint8_t*>(data);
+ cmd.dataSize = dataSize;
+ cmd.dataLayout = dataLayout;
+ cmd.writeSize = writeSize;
+
+ device->GetClient()->SerializeCommand(cmd);
+ }
+
+}} // namespace dawn_wire::client
diff --git a/chromium/third_party/dawn/src/dawn_wire/client/Queue.h b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
new file mode 100644
index 00000000000..866bccde066
--- /dev/null
+++ b/chromium/third_party/dawn/src/dawn_wire/client/Queue.h
@@ -0,0 +1,43 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef DAWNWIRE_CLIENT_QUEUE_H_
+#define DAWNWIRE_CLIENT_QUEUE_H_
+
+#include <dawn/webgpu.h>
+
+#include "dawn_wire/WireClient.h"
+#include "dawn_wire/client/ObjectBase.h"
+
+#include <map>
+
+namespace dawn_wire { namespace client {
+
+ class Queue : public ObjectBase {
+ public:
+ using ObjectBase::ObjectBase;
+
+ WGPUFence CreateFence(const WGPUFenceDescriptor* descriptor);
+ void Signal(WGPUFence fence, uint64_t signalValue);
+ void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
+ void WriteTexture(const WGPUTextureCopyView* destination,
+ const void* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize);
+ };
+
+}} // namespace dawn_wire::client
+
+#endif // DAWNWIRE_CLIENT_QUEUE_H_
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/Server.h b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
index 7dd0303df7c..8049e80ada1 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/Server.h
+++ b/chromium/third_party/dawn/src/dawn_wire/server/Server.h
@@ -25,8 +25,11 @@ namespace dawn_wire { namespace server {
struct MapUserdata {
Server* server;
ObjectHandle buffer;
+ WGPUBuffer bufferObj;
uint32_t requestSerial;
+ uint64_t offset;
uint64_t size;
+ WGPUMapModeFlags mode;
// TODO(enga): Use a tagged pointer to save space.
std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
@@ -72,14 +75,7 @@ namespace dawn_wire { namespace server {
static void ForwardUncapturedError(WGPUErrorType type, const char* message, void* userdata);
static void ForwardDeviceLost(const char* message, void* userdata);
static void ForwardPopErrorScope(WGPUErrorType type, const char* message, void* userdata);
- static void ForwardBufferMapReadAsync(WGPUBufferMapAsyncStatus status,
- const void* ptr,
- uint64_t dataLength,
- void* userdata);
- static void ForwardBufferMapWriteAsync(WGPUBufferMapAsyncStatus status,
- void* ptr,
- uint64_t dataLength,
- void* userdata);
+ static void ForwardBufferMapAsync(WGPUBufferMapAsyncStatus status, void* userdata);
static void ForwardFenceCompletedValue(WGPUFenceCompletionStatus status, void* userdata);
// Error callbacks
@@ -88,14 +84,7 @@ namespace dawn_wire { namespace server {
void OnDevicePopErrorScope(WGPUErrorType type,
const char* message,
ErrorScopeUserdata* userdata);
- void OnBufferMapReadAsyncCallback(WGPUBufferMapAsyncStatus status,
- const void* ptr,
- uint64_t dataLength,
- MapUserdata* userdata);
- void OnBufferMapWriteAsyncCallback(WGPUBufferMapAsyncStatus status,
- void* ptr,
- uint64_t dataLength,
- MapUserdata* userdata);
+ void OnBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, MapUserdata* userdata);
void OnFenceCompletedValueUpdated(WGPUFenceCompletionStatus status,
FenceCompletionUserdata* userdata);
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
index 1b516f96be5..f85ff154147 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerBuffer.cpp
@@ -46,7 +46,9 @@ namespace dawn_wire { namespace server {
bool Server::DoBufferMapAsync(ObjectId bufferId,
uint32_t requestSerial,
- bool isWrite,
+ WGPUMapModeFlags mode,
+ size_t offset,
+ size_t size,
uint64_t handleCreateInfoLength,
const uint8_t* handleCreateInfo) {
// These requests are just forwarded to the buffer, with userdata containing what the
@@ -62,6 +64,13 @@ namespace dawn_wire { namespace server {
return false;
}
+ // The server only knows how to deal with write XOR read. Validate that.
+ bool isReadMode = mode & WGPUMapMode_Read;
+ bool isWriteMode = mode & WGPUMapMode_Write;
+ if (!(isReadMode ^ isWriteMode)) {
+ return false;
+ }
+
if (handleCreateInfoLength > std::numeric_limits<size_t>::max()) {
// This is the size of data deserialized from the command stream, which must be
// CPU-addressable.
@@ -71,11 +80,15 @@ namespace dawn_wire { namespace server {
std::unique_ptr<MapUserdata> userdata = std::make_unique<MapUserdata>();
userdata->server = this;
userdata->buffer = ObjectHandle{bufferId, buffer->generation};
+ userdata->bufferObj = buffer->handle;
userdata->requestSerial = requestSerial;
+ userdata->offset = offset;
+ userdata->size = size;
+ userdata->mode = mode;
// The handle will point to the mapped memory or staging memory for the mapping.
// Store it on the map request.
- if (isWrite) {
+ if (isWriteMode) {
// Deserialize metadata produced from the client to create a companion server handle.
MemoryTransferService::WriteHandle* writeHandle = nullptr;
if (!mMemoryTransferService->DeserializeWriteHandle(
@@ -86,9 +99,8 @@ namespace dawn_wire { namespace server {
userdata->writeHandle =
std::unique_ptr<MemoryTransferService::WriteHandle>(writeHandle);
- mProcs.bufferMapWriteAsync(buffer->handle, ForwardBufferMapWriteAsync,
- userdata.release());
} else {
+ ASSERT(isReadMode);
// Deserialize metadata produced from the client to create a companion server handle.
MemoryTransferService::ReadHandle* readHandle = nullptr;
if (!mMemoryTransferService->DeserializeReadHandle(
@@ -98,76 +110,61 @@ namespace dawn_wire { namespace server {
ASSERT(readHandle != nullptr);
userdata->readHandle = std::unique_ptr<MemoryTransferService::ReadHandle>(readHandle);
- mProcs.bufferMapReadAsync(buffer->handle, ForwardBufferMapReadAsync,
- userdata.release());
}
+ mProcs.bufferMapAsync(buffer->handle, mode, offset, size, ForwardBufferMapAsync,
+ userdata.release());
+
return true;
}
- bool Server::DoDeviceCreateBufferMapped(WGPUDevice device,
- const WGPUBufferDescriptor* descriptor,
- ObjectHandle bufferResult,
- uint64_t handleCreateInfoLength,
- const uint8_t* handleCreateInfo) {
- if (handleCreateInfoLength > std::numeric_limits<size_t>::max()) {
- // This is the size of data deserialized from the command stream, which must be
- // CPU-addressable.
- return false;
- }
-
+ bool Server::DoDeviceCreateBuffer(WGPUDevice device,
+ const WGPUBufferDescriptor* descriptor,
+ ObjectHandle bufferResult,
+ uint64_t handleCreateInfoLength,
+ const uint8_t* handleCreateInfo) {
+ // Create and register the buffer object.
auto* resultData = BufferObjects().Allocate(bufferResult.id);
if (resultData == nullptr) {
return false;
}
resultData->generation = bufferResult.generation;
+ resultData->handle = mProcs.deviceCreateBuffer(device, descriptor);
- WGPUCreateBufferMappedResult result = mProcs.deviceCreateBufferMapped(device, descriptor);
- ASSERT(result.buffer != nullptr);
- if (result.data == nullptr && result.dataLength != 0) {
- // Non-zero dataLength but null data is used to indicate an allocation error.
- // Don't return false because this is not fatal. result.buffer is an ErrorBuffer
- // and subsequent operations will be errors.
- // This should only happen when fuzzing with the Null backend.
- resultData->mapWriteState = BufferMapWriteState::MapError;
- } else {
- // Deserialize metadata produced from the client to create a companion server handle.
- MemoryTransferService::WriteHandle* writeHandle = nullptr;
- if (!mMemoryTransferService->DeserializeWriteHandle(
- handleCreateInfo, static_cast<size_t>(handleCreateInfoLength), &writeHandle)) {
- return false;
- }
- ASSERT(writeHandle != nullptr);
-
- // Set the target of the WriteHandle to the mapped GPU memory.
- writeHandle->SetTarget(result.data, result.dataLength);
-
- // The buffer is mapped and has a valid mappedData pointer.
- // The buffer may still be an error with fake staging data.
- resultData->mapWriteState = BufferMapWriteState::Mapped;
- resultData->writeHandle =
- std::unique_ptr<MemoryTransferService::WriteHandle>(writeHandle);
+ // If the buffer isn't mapped at creation, we are done.
+ if (!descriptor->mappedAtCreation) {
+ return handleCreateInfoLength == 0;
}
- resultData->handle = result.buffer;
- return true;
- }
-
- bool Server::DoBufferSetSubDataInternal(ObjectId bufferId,
- uint64_t start,
- uint64_t offset,
- const uint8_t* data) {
- // The null object isn't valid as `self`
- if (bufferId == 0) {
+ // This is the size of data deserialized from the command stream to create the write handle,
+ // which must be CPU-addressable.
+ if (handleCreateInfoLength > std::numeric_limits<size_t>::max()) {
return false;
}
- auto* buffer = BufferObjects().Get(bufferId);
- if (buffer == nullptr) {
+ void* mapping = mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
+ if (mapping == nullptr) {
+ // A zero mapping is used to indicate an allocation error of an error buffer. This is a
+ // valid case and isn't fatal. Remember the buffer is an error so as to skip subsequent
+ // mapping operations.
+ resultData->mapWriteState = BufferMapWriteState::MapError;
+ return true;
+ }
+
+ // Deserialize metadata produced from the client to create a companion server handle.
+ MemoryTransferService::WriteHandle* writeHandle = nullptr;
+ if (!mMemoryTransferService->DeserializeWriteHandle(
+ handleCreateInfo, static_cast<size_t>(handleCreateInfoLength), &writeHandle)) {
return false;
}
- mProcs.bufferSetSubData(buffer->handle, start, offset, data);
+ // Set the target of the WriteHandle to the mapped GPU memory.
+ ASSERT(writeHandle != nullptr);
+ writeHandle->SetTarget(mapping, descriptor->size);
+
+ resultData->mapWriteState = BufferMapWriteState::Mapped;
+ resultData->writeHandle.reset(writeHandle);
+
return true;
}
@@ -209,26 +206,12 @@ namespace dawn_wire { namespace server {
static_cast<size_t>(writeFlushInfoLength));
}
- void Server::ForwardBufferMapReadAsync(WGPUBufferMapAsyncStatus status,
- const void* ptr,
- uint64_t dataLength,
- void* userdata) {
- auto data = static_cast<MapUserdata*>(userdata);
- data->server->OnBufferMapReadAsyncCallback(status, ptr, dataLength, data);
- }
-
- void Server::ForwardBufferMapWriteAsync(WGPUBufferMapAsyncStatus status,
- void* ptr,
- uint64_t dataLength,
- void* userdata) {
+ void Server::ForwardBufferMapAsync(WGPUBufferMapAsyncStatus status, void* userdata) {
auto data = static_cast<MapUserdata*>(userdata);
- data->server->OnBufferMapWriteAsyncCallback(status, ptr, dataLength, data);
+ data->server->OnBufferMapAsyncCallback(status, data);
}
- void Server::OnBufferMapReadAsyncCallback(WGPUBufferMapAsyncStatus status,
- const void* ptr,
- uint64_t dataLength,
- MapUserdata* userdata) {
+ void Server::OnBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, MapUserdata* userdata) {
std::unique_ptr<MapUserdata> data(userdata);
// Skip sending the callback if the buffer has already been destroyed.
@@ -237,59 +220,43 @@ namespace dawn_wire { namespace server {
return;
}
- size_t initialDataInfoLength = 0;
- if (status == WGPUBufferMapAsyncStatus_Success) {
- // Get the serialization size of the message to initialize ReadHandle data.
- initialDataInfoLength = data->readHandle->SerializeInitialDataSize(ptr, dataLength);
- } else {
- dataLength = 0;
- }
+ bool isRead = data->mode & WGPUMapMode_Read;
+ bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
- ReturnBufferMapReadAsyncCallbackCmd cmd;
+ ReturnBufferMapAsyncCallbackCmd cmd;
cmd.buffer = data->buffer;
cmd.requestSerial = data->requestSerial;
cmd.status = status;
- cmd.initialDataInfoLength = initialDataInfoLength;
- cmd.initialDataInfo = nullptr;
-
- char* readHandleSpace = SerializeCommand(cmd, initialDataInfoLength);
+ cmd.readInitialDataInfoLength = 0;
+ cmd.readInitialDataInfo = nullptr;
- if (status == WGPUBufferMapAsyncStatus_Success) {
- // Serialize the initialization message into the space after the command.
- data->readHandle->SerializeInitialData(ptr, dataLength, readHandleSpace);
-
- // The in-flight map request returned successfully.
- // Move the ReadHandle so it is owned by the buffer.
- bufferData->readHandle = std::move(data->readHandle);
- }
- }
-
- void Server::OnBufferMapWriteAsyncCallback(WGPUBufferMapAsyncStatus status,
- void* ptr,
- uint64_t dataLength,
- MapUserdata* userdata) {
- std::unique_ptr<MapUserdata> data(userdata);
-
- // Skip sending the callback if the buffer has already been destroyed.
- auto* bufferData = BufferObjects().Get(data->buffer.id);
- if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
- return;
+ const void* readData = nullptr;
+ if (isSuccess && isRead) {
+ // Get the serialization size of the message to initialize ReadHandle data.
+ readData = mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
+ cmd.readInitialDataInfoLength =
+ data->readHandle->SerializeInitialDataSize(readData, data->size);
}
- ReturnBufferMapWriteAsyncCallbackCmd cmd;
- cmd.buffer = data->buffer;
- cmd.requestSerial = data->requestSerial;
- cmd.status = status;
-
- SerializeCommand(cmd);
-
- if (status == WGPUBufferMapAsyncStatus_Success) {
- // The in-flight map request returned successfully.
- // Move the WriteHandle so it is owned by the buffer.
- bufferData->writeHandle = std::move(data->writeHandle);
- bufferData->mapWriteState = BufferMapWriteState::Mapped;
- // Set the target of the WriteHandle to the mapped buffer data.
- bufferData->writeHandle->SetTarget(ptr, dataLength);
+ char* readHandleSpace = SerializeCommand(cmd, cmd.readInitialDataInfoLength);
+
+ if (isSuccess) {
+ if (isRead) {
+ // Serialize the initialization message into the space after the command.
+ data->readHandle->SerializeInitialData(readData, data->size, readHandleSpace);
+ // The in-flight map request returned successfully.
+ // Move the ReadHandle so it is owned by the buffer.
+ bufferData->readHandle = std::move(data->readHandle);
+ } else {
+ // The in-flight map request returned successfully.
+ // Move the WriteHandle so it is owned by the buffer.
+ bufferData->writeHandle = std::move(data->writeHandle);
+ bufferData->mapWriteState = BufferMapWriteState::Mapped;
+ // Set the target of the WriteHandle to the mapped buffer data.
+ bufferData->writeHandle->SetTarget(
+ mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size),
+ data->size);
+ }
}
}
diff --git a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
index 6e47492d3b0..0c5a6b880f8 100644
--- a/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
+++ b/chromium/third_party/dawn/src/dawn_wire/server/ServerQueue.cpp
@@ -55,4 +55,21 @@ namespace dawn_wire { namespace server {
return true;
}
+ bool Server::DoQueueWriteTextureInternal(ObjectId queueId,
+ const WGPUTextureCopyView* destination,
+ const uint8_t* data,
+ size_t dataSize,
+ const WGPUTextureDataLayout* dataLayout,
+ const WGPUExtent3D* writeSize) {
+ // The null object isn't valid as `self` so we can combine the check with the
+ // check that the ID is valid.
+ auto* queue = QueueObjects().Get(queueId);
+ if (queue == nullptr) {
+ return false;
+ }
+
+ mProcs.queueWriteTexture(queue->handle, destination, data, dataSize, dataLayout, writeSize);
+ return true;
+ }
+
}} // namespace dawn_wire::server
diff --git a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
index bdefdf37be8..035ad96188e 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/D3D12Backend.h
@@ -18,6 +18,7 @@
#include <dawn/dawn_wsi.h>
#include <dawn_native/DawnNative.h>
+#include <DXGI1_4.h>
#include <windows.h>
#include <wrl/client.h>
@@ -52,6 +53,12 @@ namespace dawn_native { namespace d3d12 {
DAWN_NATIVE_EXPORT WGPUTexture
WrapSharedHandle(WGPUDevice device, const ExternalImageDescriptorDXGISharedHandle* descriptor);
+ struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
+ AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
+
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
+ };
+
}} // namespace dawn_native::d3d12
#endif // DAWNNATIVE_D3D12BACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
index 219e53fa94b..a57baeb1193 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/DawnNative.h
@@ -157,6 +157,9 @@ namespace dawn_native {
// Enable debug capture on Dawn startup
void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+ // Enable GPU based backend validation if it has.
+ void EnableGPUBasedBackendValidation(bool enableGPUBasedBackendValidation);
+
void SetPlatform(dawn_platform::Platform* platform);
// Returns the underlying WGPUInstance object.
@@ -179,11 +182,13 @@ namespace dawn_native {
DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
// Query if texture has been initialized
- DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(WGPUTexture texture,
- uint32_t baseMipLevel,
- uint32_t levelCount,
- uint32_t baseArrayLayer,
- uint32_t layerCount);
+ DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
+ WGPUTexture texture,
+ uint32_t baseMipLevel,
+ uint32_t levelCount,
+ uint32_t baseArrayLayer,
+ uint32_t layerCount,
+ WGPUTextureAspect aspect = WGPUTextureAspect_All);
// Backdoor to get the order of the ProcMap for testing
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
diff --git a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
index 0965871941c..4e5aee995e8 100644
--- a/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
+++ b/chromium/third_party/dawn/src/include/dawn_native/VulkanBackend.h
@@ -23,6 +23,7 @@
#include <vector>
namespace dawn_native { namespace vulkan {
+
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
@@ -75,6 +76,7 @@ namespace dawn_native { namespace vulkan {
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice cDevice,
const ExternalImageDescriptor* descriptor);
#endif // __linux__
+
}} // namespace dawn_native::vulkan
#endif // DAWNNATIVE_VULKANBACKEND_H_
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
index 5b5f33c2ace..815b66b877a 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireClient.h
@@ -26,7 +26,7 @@ namespace dawn_wire {
namespace client {
class Client;
class MemoryTransferService;
- }
+ } // namespace client
struct ReservedTexture {
WGPUTexture texture;
diff --git a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
index b59572180a8..b3d1e62a1ff 100644
--- a/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
+++ b/chromium/third_party/dawn/src/include/dawn_wire/WireServer.h
@@ -26,7 +26,7 @@ namespace dawn_wire {
namespace server {
class Server;
class MemoryTransferService;
- }
+ } // namespace server
struct DAWN_WIRE_EXPORT WireServerDescriptor {
WGPUDevice device;
diff --git a/chromium/third_party/dawn/src/include/webgpu/webgpu.h b/chromium/third_party/dawn/src/include/webgpu/webgpu.h
new file mode 100644
index 00000000000..4a29d373468
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/webgpu/webgpu.h
@@ -0,0 +1 @@
+#include "dawn/webgpu.h"
diff --git a/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h b/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h
new file mode 100644
index 00000000000..5bbd869b944
--- /dev/null
+++ b/chromium/third_party/dawn/src/include/webgpu/webgpu_cpp.h
@@ -0,0 +1 @@
+#include <dawn/webgpu_cpp.h>
diff --git a/chromium/third_party/dawn/src/tests/BUILD.gn b/chromium/third_party/dawn/src/tests/BUILD.gn
index a1e77415ccc..7817d8ac70c 100644
--- a/chromium/third_party/dawn/src/tests/BUILD.gn
+++ b/chromium/third_party/dawn/src/tests/BUILD.gn
@@ -156,12 +156,14 @@ test("dawn_unittests") {
"unittests/BuddyMemoryAllocatorTests.cpp",
"unittests/CommandAllocatorTests.cpp",
"unittests/EnumClassBitmasksTests.cpp",
+ "unittests/EnumMaskIteratorTests.cpp",
"unittests/ErrorTests.cpp",
"unittests/ExtensionTests.cpp",
"unittests/GetProcAddressTests.cpp",
"unittests/ITypArrayTests.cpp",
"unittests/ITypBitsetTests.cpp",
"unittests/ITypSpanTests.cpp",
+ "unittests/ITypVectorTests.cpp",
"unittests/LinkedListTests.cpp",
"unittests/MathTests.cpp",
"unittests/ObjectBaseTests.cpp",
@@ -173,6 +175,7 @@ test("dawn_unittests") {
"unittests/SerialMapTests.cpp",
"unittests/SerialQueueTests.cpp",
"unittests/SlabAllocatorTests.cpp",
+ "unittests/StackContainerTests.cpp",
"unittests/SystemUtilsTests.cpp",
"unittests/ToBackendTests.cpp",
"unittests/TypedIntegerTests.cpp",
@@ -192,6 +195,7 @@ test("dawn_unittests") {
"unittests/validation/MinimumBufferSizeValidationTests.cpp",
"unittests/validation/QuerySetValidationTests.cpp",
"unittests/validation/QueueSubmitValidationTests.cpp",
+ "unittests/validation/QueueWriteTextureValidationTests.cpp",
"unittests/validation/RenderBundleValidationTests.cpp",
"unittests/validation/RenderPassDescriptorValidationTests.cpp",
"unittests/validation/RenderPipelineValidationTests.cpp",
@@ -259,6 +263,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/BasicTests.cpp",
"end2end/BindGroupTests.cpp",
"end2end/BufferTests.cpp",
+ "end2end/BufferZeroInitTests.cpp",
"end2end/ClipSpaceTests.cpp",
"end2end/ColorStateTests.cpp",
"end2end/CompressedTextureFormatTests.cpp",
@@ -271,6 +276,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/DebugMarkerTests.cpp",
"end2end/DeprecatedAPITests.cpp",
"end2end/DepthSamplingTests.cpp",
+ "end2end/DepthStencilCopyTests.cpp",
"end2end/DepthStencilStateTests.cpp",
"end2end/DestroyTests.cpp",
"end2end/DeviceLostTests.cpp",
@@ -290,6 +296,7 @@ source_set("dawn_end2end_tests_sources") {
"end2end/OpArrayLengthTests.cpp",
"end2end/PipelineLayoutTests.cpp",
"end2end/PrimitiveTopologyTests.cpp",
+ "end2end/QueryTests.cpp",
"end2end/QueueTests.cpp",
"end2end/RenderBundleTests.cpp",
"end2end/RenderPassLoadOpTests.cpp",
@@ -327,7 +334,11 @@ source_set("dawn_end2end_tests_sources") {
if (dawn_enable_metal) {
sources += [ "end2end/IOSurfaceWrappingTests.cpp" ]
- libs += [ "IOSurface.framework" ]
+ frameworks = [ "IOSurface.framework" ]
+ }
+
+ if (dawn_enable_wgsl) {
+ sources += [ "end2end/VertexBufferRobustnessTests.cpp" ]
}
if (dawn_enable_opengl) {
@@ -470,16 +481,17 @@ test("dawn_perf_tests") {
libs = []
- # When building inside Chromium, use their gtest main function because it is
- # needed to run in swarming correctly.
+ # When building inside Chromium, use their gtest main function and the
+ # other perf test scaffolding in order to run in swarming correctly.
if (build_with_chromium) {
deps += [ ":dawn_perf_tests_main" ]
+ data_deps = [ "//testing:run_perf_test" ]
} else {
sources += [ "PerfTestsMain.cpp" ]
}
if (dawn_enable_metal) {
- libs += [ "IOSurface.framework" ]
+ frameworks = [ "IOSurface.framework" ]
}
if (dawn_enable_opengl) {
diff --git a/chromium/third_party/dawn/src/utils/BUILD.gn b/chromium/third_party/dawn/src/utils/BUILD.gn
index 9d5d69c7541..38fd58797be 100644
--- a/chromium/third_party/dawn/src/utils/BUILD.gn
+++ b/chromium/third_party/dawn/src/utils/BUILD.gn
@@ -25,9 +25,7 @@ import("${dawn_root}/scripts/dawn_features.gni")
# use the real library from third_party/.
if (dawn_supports_glfw_for_windowing) {
group("dawn_glfw") {
- public_deps = [
- "${dawn_root}/third_party/gn/glfw",
- ]
+ public_deps = [ "${dawn_root}/third_party/gn/glfw" ]
}
} else if (is_fuchsia) {
# The mock implementation of GLFW on Fuchsia
@@ -52,9 +50,7 @@ if (dawn_supports_glfw_for_windowing) {
"Glfw3Fuchsia.cpp",
]
public_configs = [ ":dawn_glfw_public_config" ]
- deps = [
- "${dawn_root}/src/common",
- ]
+ deps = [ "${dawn_root}/src/common" ]
}
} else {
# Just skip GLFW on other systems
@@ -74,10 +70,13 @@ static_library("dawn_utils") {
"ComboRenderBundleEncoderDescriptor.h",
"ComboRenderPipelineDescriptor.cpp",
"ComboRenderPipelineDescriptor.h",
+ "PlatformDebugLogger.h",
"SystemUtils.cpp",
"SystemUtils.h",
"TerribleCommandBuffer.cpp",
"TerribleCommandBuffer.h",
+ "TestUtils.cpp",
+ "TestUtils.h",
"TextureFormatUtils.cpp",
"TextureFormatUtils.h",
"Timer.h",
@@ -91,6 +90,13 @@ static_library("dawn_utils") {
"${dawn_shaderc_dir}:libshaderc",
]
libs = []
+ frameworks = []
+
+ if (is_win) {
+ sources += [ "WindowsDebugLogger.cpp" ]
+ } else {
+ sources += [ "EmptyDebugLogger.cpp" ]
+ }
if (is_win) {
sources += [ "WindowsTimer.cpp" ]
@@ -100,7 +106,7 @@ static_library("dawn_utils") {
"ObjCUtils.h",
"ObjCUtils.mm",
]
- libs += [ "QuartzCore.framework" ]
+ frameworks += [ "QuartzCore.framework" ]
} else {
sources += [ "PosixTimer.cpp" ]
}
@@ -114,13 +120,11 @@ static_library("dawn_utils") {
if (dawn_enable_metal) {
sources += [ "GLFWUtils_metal.mm" ]
- libs += [ "Metal.framework" ]
+ frameworks += [ "Metal.framework" ]
}
}
- public_deps = [
- "${dawn_root}/src/dawn:dawncpp_headers",
- ]
+ public_deps = [ "${dawn_root}/src/dawn:dawncpp_headers" ]
}
###############################################################################
@@ -137,9 +141,7 @@ if (dawn_standalone) {
"BackendBinding.h",
]
- public_deps = [
- "${dawn_root}/src/dawn:dawn_headers",
- ]
+ public_deps = [ "${dawn_root}/src/dawn:dawn_headers" ]
deps = [
":dawn_glfw",
@@ -147,6 +149,7 @@ if (dawn_standalone) {
"${dawn_root}/src/dawn_native",
]
libs = []
+ frameworks = []
if (dawn_enable_d3d12) {
sources += [ "D3D12Binding.cpp" ]
@@ -154,7 +157,7 @@ if (dawn_standalone) {
if (dawn_enable_metal) {
sources += [ "MetalBinding.mm" ]
- libs += [
+ frameworks += [
"Metal.framework",
"QuartzCore.framework",
]
diff --git a/chromium/third_party/dawn/src/utils/CMakeLists.txt b/chromium/third_party/dawn/src/utils/CMakeLists.txt
index 3a959401a2b..e215f516377 100644
--- a/chromium/third_party/dawn/src/utils/CMakeLists.txt
+++ b/chromium/third_party/dawn/src/utils/CMakeLists.txt
@@ -22,10 +22,13 @@ target_sources(dawn_utils PRIVATE
"ComboRenderPipelineDescriptor.h"
"GLFWUtils.cpp"
"GLFWUtils.h"
+ "PlatformDebugLogger.h"
"SystemUtils.cpp"
"SystemUtils.h"
"TerribleCommandBuffer.cpp"
"TerribleCommandBuffer.h"
+ "TestUtils.cpp"
+ "TestUtils.h"
"TextureFormatUtils.cpp"
"TextureFormatUtils.h"
"Timer.h"
@@ -43,6 +46,12 @@ target_link_libraries(dawn_utils
)
if(WIN32)
+ target_sources(dawn_utils PRIVATE "WindowsDebugLogger.cpp")
+else()
+ target_sources(dawn_utils PRIVATE "EmptyDebugLogger.cpp")
+endif()
+
+if(WIN32)
target_sources(dawn_utils PRIVATE "WindowsTimer.cpp")
elseif(APPLE)
target_sources(dawn_utils PRIVATE
diff --git a/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp b/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp
new file mode 100644
index 00000000000..ed0ad7ff532
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/EmptyDebugLogger.cpp
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "utils/PlatformDebugLogger.h"
+
+namespace utils {
+
+ class EmptyDebugLogger : public PlatformDebugLogger {
+ public:
+ EmptyDebugLogger() = default;
+ ~EmptyDebugLogger() override = default;
+ };
+
+ PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new EmptyDebugLogger();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/PlatformDebugLogger.h b/chromium/third_party/dawn/src/utils/PlatformDebugLogger.h
new file mode 100644
index 00000000000..33c46dec44b
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/PlatformDebugLogger.h
@@ -0,0 +1,29 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_PLATFORMDEBUGLOGGER_H_
+#define UTILS_PLATFORMDEBUGLOGGER_H_
+
+namespace utils {
+
+ class PlatformDebugLogger {
+ public:
+ virtual ~PlatformDebugLogger() = default;
+ };
+
+ PlatformDebugLogger* CreatePlatformDebugLogger();
+
+} // namespace utils
+
+#endif // UTILS_PLATFORMDEBUGLOGGER_H_
diff --git a/chromium/third_party/dawn/src/utils/SystemUtils.h b/chromium/third_party/dawn/src/utils/SystemUtils.h
index 828eb58aabf..1f42cc539a6 100644
--- a/chromium/third_party/dawn/src/utils/SystemUtils.h
+++ b/chromium/third_party/dawn/src/utils/SystemUtils.h
@@ -20,4 +20,4 @@ namespace utils {
void USleep(unsigned int usecs);
}
-#endif // UTILS_SYSTEMUTILS_H_ \ No newline at end of file
+#endif // UTILS_SYSTEMUTILS_H_
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.cpp b/chromium/third_party/dawn/src/utils/TestUtils.cpp
new file mode 100644
index 00000000000..cdbe3b3fc85
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/TestUtils.cpp
@@ -0,0 +1,107 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "utils/TestUtils.h"
+
+#include "common/Assert.h"
+#include "common/Constants.h"
+#include "common/Math.h"
+#include "utils/TextureFormatUtils.h"
+#include "utils/WGPUHelpers.h"
+
+#include <vector>
+
+namespace utils {
+
+ uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ return Align(bytesPerTexel * width, kTextureBytesPerRowAlignment);
+ }
+
+ uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
+ uint32_t width,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ uint32_t copyArrayLayerCount) {
+ ASSERT(rowsPerImage > 0);
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ const uint32_t bytesAtLastImage = bytesPerRow * (rowsPerImage - 1) + bytesPerTexel * width;
+ return bytesPerRow * rowsPerImage * (copyArrayLayerCount - 1) + bytesAtLastImage;
+ }
+
+ // TODO(jiawei.shao@intel.com): support compressed texture formats
+ TextureDataCopyLayout GetTextureDataCopyLayoutForTexture2DAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ uint32_t rowsPerImage) {
+ TextureDataCopyLayout layout;
+
+ layout.mipSize = {textureSizeAtLevel0.width >> mipmapLevel,
+ textureSizeAtLevel0.height >> mipmapLevel, textureSizeAtLevel0.depth};
+
+ layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
+
+ uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+ layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+
+ layout.byteLength =
+ GetBytesInBufferTextureCopy(format, layout.mipSize.width, layout.bytesPerRow,
+ appliedRowsPerImage, textureSizeAtLevel0.depth);
+
+ const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+ layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+ layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+ layout.texelBlockCount = layout.byteLength / bytesPerTexel;
+
+ return layout;
+ }
+
+ uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat) {
+ if (copyExtent.width == 0 || copyExtent.height == 0 || copyExtent.depth == 0) {
+ return 0;
+ } else {
+ uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
+ uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
+ uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
+
+ uint64_t texelBlockRowsPerImage = rowsPerImage / blockHeight;
+ uint64_t bytesPerImage = bytesPerRow * texelBlockRowsPerImage;
+ uint64_t bytesInLastSlice = bytesPerRow * (copyExtent.height / blockHeight - 1) +
+ (copyExtent.width / blockWidth * blockSize);
+ return bytesPerImage * (copyExtent.depth - 1) + bytesInLastSlice;
+ }
+ }
+
+ void UnalignDynamicUploader(wgpu::Device device) {
+ std::vector<uint8_t> data = {1};
+
+ wgpu::TextureDescriptor descriptor = {};
+ descriptor.size = {1, 1, 1};
+ descriptor.format = wgpu::TextureFormat::R8Unorm;
+ descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+ wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+ wgpu::TextureCopyView textureCopyView = utils::CreateTextureCopyView(texture, 0, {0, 0, 0});
+ wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+ wgpu::Extent3D copyExtent = {1, 1, 1};
+
+ // WriteTexture with exactly 1 byte of data.
+ device.GetDefaultQueue().WriteTexture(&textureCopyView, data.data(), 1, &textureDataLayout,
+ &copyExtent);
+ }
+} // namespace utils \ No newline at end of file
diff --git a/chromium/third_party/dawn/src/utils/TestUtils.h b/chromium/third_party/dawn/src/utils/TestUtils.h
new file mode 100644
index 00000000000..d1ba25fa410
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/TestUtils.h
@@ -0,0 +1,57 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef UTILS_TESTHELPERS_H_
+#define UTILS_TESTHELPERS_H_
+
+#include <dawn/webgpu_cpp.h>
+
+namespace utils {
+
+ struct TextureDataCopyLayout {
+ uint64_t byteLength;
+ uint64_t texelBlockCount;
+ uint32_t bytesPerRow;
+ uint32_t texelBlocksPerRow;
+ uint32_t bytesPerImage;
+ uint32_t texelBlocksPerImage;
+ wgpu::Extent3D mipSize;
+ };
+
+ uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
+ uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
+ uint32_t width,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage,
+ uint32_t copyArrayLayerCount);
+ TextureDataCopyLayout GetTextureDataCopyLayoutForTexture2DAtLevel(
+ wgpu::TextureFormat format,
+ wgpu::Extent3D textureSizeAtLevel0,
+ uint32_t mipmapLevel,
+ uint32_t rowsPerImage);
+
+ uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+ uint64_t rowsPerImage,
+ wgpu::Extent3D copyExtent,
+ wgpu::TextureFormat textureFormat);
+
+ // A helper function used for testing DynamicUploader offset alignment.
+ // A call of this function will do a Queue::WriteTexture with 1 byte of data,
+ // so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
+ // in it will contain 1 byte of data.
+ void UnalignDynamicUploader(wgpu::Device device);
+
+} // namespace utils
+
+#endif // UTILS_TESTHELPERS_H_
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
index 795d1f948ac..89cd7d0ee2f 100644
--- a/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.cpp
@@ -27,7 +27,8 @@ namespace utils {
case wgpu::TextureFormat::RGBA8Unorm:
case wgpu::TextureFormat::RGBA8Snorm:
case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
case wgpu::TextureFormat::RG32Float:
case wgpu::TextureFormat::RGBA16Float:
case wgpu::TextureFormat::RGBA32Float:
@@ -118,7 +119,9 @@ namespace utils {
case wgpu::TextureFormat::BGRA8Unorm:
case wgpu::TextureFormat::BGRA8UnormSrgb:
case wgpu::TextureFormat::RGB10A2Unorm:
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::Depth32Float:
return 4u;
case wgpu::TextureFormat::RG32Float:
@@ -152,9 +155,140 @@ namespace utils {
case wgpu::TextureFormat::BC7RGBAUnormSrgb:
return 16u;
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ case wgpu::TextureFormat::Undefined:
+ default:
+ UNREACHABLE();
+ return 0u;
+ }
+ }
+
+ uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
+ case wgpu::TextureFormat::Depth32Float:
+ case wgpu::TextureFormat::Depth24Plus:
+ case wgpu::TextureFormat::Depth24PlusStencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return 4u;
+
+ case wgpu::TextureFormat::Undefined:
+ default:
+ UNREACHABLE();
+ return 0u;
+ }
+ }
+
+ uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
+ switch (textureFormat) {
+ case wgpu::TextureFormat::R8Unorm:
+ case wgpu::TextureFormat::R8Snorm:
+ case wgpu::TextureFormat::R8Uint:
+ case wgpu::TextureFormat::R8Sint:
+ case wgpu::TextureFormat::R16Uint:
+ case wgpu::TextureFormat::R16Sint:
+ case wgpu::TextureFormat::R16Float:
+ case wgpu::TextureFormat::RG8Unorm:
+ case wgpu::TextureFormat::RG8Snorm:
+ case wgpu::TextureFormat::RG8Uint:
+ case wgpu::TextureFormat::RG8Sint:
+ case wgpu::TextureFormat::R32Float:
+ case wgpu::TextureFormat::R32Uint:
+ case wgpu::TextureFormat::R32Sint:
+ case wgpu::TextureFormat::RG16Uint:
+ case wgpu::TextureFormat::RG16Sint:
+ case wgpu::TextureFormat::RG16Float:
+ case wgpu::TextureFormat::RGBA8Unorm:
+ case wgpu::TextureFormat::RGBA8UnormSrgb:
+ case wgpu::TextureFormat::RGBA8Snorm:
+ case wgpu::TextureFormat::RGBA8Uint:
+ case wgpu::TextureFormat::RGBA8Sint:
+ case wgpu::TextureFormat::BGRA8Unorm:
+ case wgpu::TextureFormat::BGRA8UnormSrgb:
+ case wgpu::TextureFormat::RGB10A2Unorm:
+ case wgpu::TextureFormat::RG11B10Ufloat:
+ case wgpu::TextureFormat::RGB9E5Ufloat:
+ case wgpu::TextureFormat::RG32Float:
+ case wgpu::TextureFormat::RG32Uint:
+ case wgpu::TextureFormat::RG32Sint:
+ case wgpu::TextureFormat::RGBA16Uint:
+ case wgpu::TextureFormat::RGBA16Sint:
+ case wgpu::TextureFormat::RGBA16Float:
+ case wgpu::TextureFormat::RGBA32Float:
+ case wgpu::TextureFormat::RGBA32Uint:
+ case wgpu::TextureFormat::RGBA32Sint:
case wgpu::TextureFormat::Depth32Float:
case wgpu::TextureFormat::Depth24Plus:
case wgpu::TextureFormat::Depth24PlusStencil8:
+ return 1u;
+
+ case wgpu::TextureFormat::BC1RGBAUnorm:
+ case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC4RUnorm:
+ case wgpu::TextureFormat::BC4RSnorm:
+ case wgpu::TextureFormat::BC2RGBAUnorm:
+ case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC3RGBAUnorm:
+ case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+ case wgpu::TextureFormat::BC5RGUnorm:
+ case wgpu::TextureFormat::BC5RGSnorm:
+ case wgpu::TextureFormat::BC6HRGBUfloat:
+ case wgpu::TextureFormat::BC6HRGBSfloat:
+ case wgpu::TextureFormat::BC7RGBAUnorm:
+ case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+ return 4u;
+
case wgpu::TextureFormat::Undefined:
default:
UNREACHABLE();
@@ -208,7 +342,7 @@ namespace utils {
return "rgba8i";
case wgpu::TextureFormat::RGB10A2Unorm:
return "rgb10_a2";
- case wgpu::TextureFormat::RG11B10Float:
+ case wgpu::TextureFormat::RG11B10Ufloat:
return "r11f_g11f_b10f";
case wgpu::TextureFormat::RG32Float:
return "rg32f";
@@ -229,6 +363,7 @@ namespace utils {
case wgpu::TextureFormat::RGBA32Sint:
return "rgba32i";
+ case wgpu::TextureFormat::RGB9E5Ufloat:
case wgpu::TextureFormat::RGBA8UnormSrgb:
case wgpu::TextureFormat::BGRA8Unorm:
case wgpu::TextureFormat::BGRA8UnormSrgb:
diff --git a/chromium/third_party/dawn/src/utils/TextureFormatUtils.h b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
index 2976e94d3fa..7a7fa429bd6 100644
--- a/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
+++ b/chromium/third_party/dawn/src/utils/TextureFormatUtils.h
@@ -22,39 +22,77 @@
#include "common/Assert.h"
namespace utils {
- static constexpr std::array<wgpu::TextureFormat, 52> kAllTextureFormats = {
- wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::R8Snorm,
- wgpu::TextureFormat::R8Uint, wgpu::TextureFormat::R8Sint,
- wgpu::TextureFormat::R16Uint, wgpu::TextureFormat::R16Sint,
- wgpu::TextureFormat::R16Float, wgpu::TextureFormat::RG8Unorm,
- wgpu::TextureFormat::RG8Snorm, wgpu::TextureFormat::RG8Uint,
- wgpu::TextureFormat::RG8Sint, wgpu::TextureFormat::R32Float,
- wgpu::TextureFormat::R32Uint, wgpu::TextureFormat::R32Sint,
- wgpu::TextureFormat::RG16Uint, wgpu::TextureFormat::RG16Sint,
- wgpu::TextureFormat::RG16Float, wgpu::TextureFormat::RGBA8Unorm,
- wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
- wgpu::TextureFormat::RGBA8Uint, wgpu::TextureFormat::RGBA8Sint,
- wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb,
- wgpu::TextureFormat::RGB10A2Unorm, wgpu::TextureFormat::RG11B10Float,
- wgpu::TextureFormat::RG32Float, wgpu::TextureFormat::RG32Uint,
- wgpu::TextureFormat::RG32Sint, wgpu::TextureFormat::RGBA16Uint,
- wgpu::TextureFormat::RGBA16Sint, wgpu::TextureFormat::RGBA16Float,
- wgpu::TextureFormat::RGBA32Float, wgpu::TextureFormat::RGBA32Uint,
- wgpu::TextureFormat::RGBA32Sint, wgpu::TextureFormat::Depth32Float,
- wgpu::TextureFormat::Depth24Plus, wgpu::TextureFormat::Depth24PlusStencil8,
- wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
- wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
- wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
- wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
- wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
- wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBSfloat,
- wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb,
+ static constexpr std::array<wgpu::TextureFormat, 53> kAllTextureFormats = {
+ wgpu::TextureFormat::R8Unorm,
+ wgpu::TextureFormat::R8Snorm,
+ wgpu::TextureFormat::R8Uint,
+ wgpu::TextureFormat::R8Sint,
+ wgpu::TextureFormat::R16Uint,
+ wgpu::TextureFormat::R16Sint,
+ wgpu::TextureFormat::R16Float,
+ wgpu::TextureFormat::RG8Unorm,
+ wgpu::TextureFormat::RG8Snorm,
+ wgpu::TextureFormat::RG8Uint,
+ wgpu::TextureFormat::RG8Sint,
+ wgpu::TextureFormat::R32Float,
+ wgpu::TextureFormat::R32Uint,
+ wgpu::TextureFormat::R32Sint,
+ wgpu::TextureFormat::RG16Uint,
+ wgpu::TextureFormat::RG16Sint,
+ wgpu::TextureFormat::RG16Float,
+ wgpu::TextureFormat::RGBA8Unorm,
+ wgpu::TextureFormat::RGBA8UnormSrgb,
+ wgpu::TextureFormat::RGBA8Snorm,
+ wgpu::TextureFormat::RGBA8Uint,
+ wgpu::TextureFormat::RGBA8Sint,
+ wgpu::TextureFormat::BGRA8Unorm,
+ wgpu::TextureFormat::BGRA8UnormSrgb,
+ wgpu::TextureFormat::RGB10A2Unorm,
+ wgpu::TextureFormat::RG11B10Ufloat,
+ wgpu::TextureFormat::RGB9E5Ufloat,
+ wgpu::TextureFormat::RG32Float,
+ wgpu::TextureFormat::RG32Uint,
+ wgpu::TextureFormat::RG32Sint,
+ wgpu::TextureFormat::RGBA16Uint,
+ wgpu::TextureFormat::RGBA16Sint,
+ wgpu::TextureFormat::RGBA16Float,
+ wgpu::TextureFormat::RGBA32Float,
+ wgpu::TextureFormat::RGBA32Uint,
+ wgpu::TextureFormat::RGBA32Sint,
+ wgpu::TextureFormat::Depth32Float,
+ wgpu::TextureFormat::Depth24Plus,
+ wgpu::TextureFormat::Depth24PlusStencil8,
+ wgpu::TextureFormat::BC1RGBAUnorm,
+ wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm,
+ wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm,
+ wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm,
+ wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm,
+ wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat,
+ wgpu::TextureFormat::BC6HRGBSfloat,
+ wgpu::TextureFormat::BC7RGBAUnorm,
+ wgpu::TextureFormat::BC7RGBAUnormSrgb,
};
+ static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
+ wgpu::TextureFormat::BC1RGBAUnorm, wgpu::TextureFormat::BC1RGBAUnormSrgb,
+ wgpu::TextureFormat::BC2RGBAUnorm, wgpu::TextureFormat::BC2RGBAUnormSrgb,
+ wgpu::TextureFormat::BC3RGBAUnorm, wgpu::TextureFormat::BC3RGBAUnormSrgb,
+ wgpu::TextureFormat::BC4RUnorm, wgpu::TextureFormat::BC4RSnorm,
+ wgpu::TextureFormat::BC5RGUnorm, wgpu::TextureFormat::BC5RGSnorm,
+ wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBSfloat,
+ wgpu::TextureFormat::BC7RGBAUnorm, wgpu::TextureFormat::BC7RGBAUnormSrgb};
+
const char* GetColorTextureComponentTypePrefix(wgpu::TextureFormat textureFormat);
bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+ uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
+ uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
const char* GetGLSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
index 686b223f35a..4e2f6e87117 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.cpp
@@ -14,11 +14,8 @@
#include "utils/WGPUHelpers.h"
-#include "common/Assert.h"
#include "common/Constants.h"
#include "common/Log.h"
-#include "common/Math.h"
-#include "utils/TextureFormatUtils.h"
#include <shaderc/shaderc.hpp>
@@ -144,6 +141,14 @@ namespace utils {
return CreateShaderModuleFromResult(device, result);
}
+ wgpu::ShaderModule CreateShaderModuleFromWGSL(const wgpu::Device& device, const char* source) {
+ wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+ wgslDesc.source = source;
+ wgpu::ShaderModuleDescriptor descriptor;
+ descriptor.nextInChain = &wgslDesc;
+ return device.CreateShaderModule(&descriptor);
+ }
+
std::vector<uint32_t> CompileGLSLToSpirv(SingleShaderStage stage, const char* source) {
shaderc_shader_kind kind = ShadercShaderKind(stage);
@@ -267,26 +272,37 @@ namespace utils {
uint64_t offset,
uint32_t bytesPerRow,
uint32_t rowsPerImage) {
- wgpu::BufferCopyView bufferCopyView;
+ wgpu::BufferCopyView bufferCopyView = {};
bufferCopyView.buffer = buffer;
- bufferCopyView.offset = offset;
- bufferCopyView.bytesPerRow = bytesPerRow;
- bufferCopyView.rowsPerImage = rowsPerImage;
+ bufferCopyView.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
return bufferCopyView;
}
wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
uint32_t mipLevel,
- wgpu::Origin3D origin) {
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect) {
wgpu::TextureCopyView textureCopyView;
textureCopyView.texture = texture;
textureCopyView.mipLevel = mipLevel;
textureCopyView.origin = origin;
+ textureCopyView.aspect = aspect;
return textureCopyView;
}
+ wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage) {
+ wgpu::TextureDataLayout textureDataLayout;
+ textureDataLayout.offset = offset;
+ textureDataLayout.bytesPerRow = bytesPerRow;
+ textureDataLayout.rowsPerImage = rowsPerImage;
+
+ return textureDataLayout;
+ }
+
wgpu::SamplerDescriptor GetDefaultSamplerDescriptor() {
wgpu::SamplerDescriptor desc = {};
@@ -374,47 +390,4 @@ namespace utils {
return device.CreateBindGroup(&descriptor);
}
- uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
- const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
- return Align(bytesPerTexel * width, kTextureBytesPerRowAlignment);
- }
-
- uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
- uint32_t width,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- uint32_t copyArrayLayerCount) {
- ASSERT(rowsPerImage > 0);
- const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
- const uint32_t bytesAtLastImage = bytesPerRow * (rowsPerImage - 1) + bytesPerTexel * width;
- return bytesPerRow * rowsPerImage * (copyArrayLayerCount - 1) + bytesAtLastImage;
- }
-
- // TODO(jiawei.shao@intel.com): support compressed texture formats
- BufferTextureCopyLayout GetBufferTextureCopyLayoutForTexture2DAtLevel(
- wgpu::TextureFormat format,
- wgpu::Extent3D textureSizeAtLevel0,
- uint32_t mipmapLevel,
- uint32_t rowsPerImage) {
- BufferTextureCopyLayout layout;
-
- layout.mipSize = {textureSizeAtLevel0.width >> mipmapLevel,
- textureSizeAtLevel0.height >> mipmapLevel, textureSizeAtLevel0.depth};
-
- layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
-
- uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
- layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
-
- layout.byteLength =
- GetBytesInBufferTextureCopy(format, layout.mipSize.width, layout.bytesPerRow,
- appliedRowsPerImage, textureSizeAtLevel0.depth);
-
- const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
- layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
- layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
- layout.texelBlockCount = layout.byteLength / bytesPerTexel;
-
- return layout;
- }
} // namespace utils
diff --git a/chromium/third_party/dawn/src/utils/WGPUHelpers.h b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
index fdbd8648f77..b77b6a438bc 100644
--- a/chromium/third_party/dawn/src/utils/WGPUHelpers.h
+++ b/chromium/third_party/dawn/src/utils/WGPUHelpers.h
@@ -22,6 +22,7 @@
#include <vector>
#include "common/Constants.h"
+#include "utils/TextureFormatUtils.h"
namespace utils {
@@ -33,6 +34,8 @@ namespace utils {
SingleShaderStage stage,
const char* source);
wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
+ wgpu::ShaderModule CreateShaderModuleFromWGSL(const wgpu::Device& device, const char* source);
+
std::vector<uint32_t> CompileGLSLToSpirv(SingleShaderStage stage, const char* source);
wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
@@ -51,9 +54,14 @@ namespace utils {
uint64_t offset,
uint32_t bytesPerRow,
uint32_t rowsPerImage);
- wgpu::TextureCopyView CreateTextureCopyView(wgpu::Texture texture,
- uint32_t level,
- wgpu::Origin3D origin);
+ wgpu::TextureCopyView CreateTextureCopyView(
+ wgpu::Texture texture,
+ uint32_t level,
+ wgpu::Origin3D origin,
+ wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
+ wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+ uint32_t bytesPerRow,
+ uint32_t rowsPerImage);
struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
public:
@@ -66,7 +74,7 @@ namespace utils {
std::array<wgpu::RenderPassColorAttachmentDescriptor, kMaxColorAttachments>
cColorAttachments;
- wgpu::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo;
+ wgpu::RenderPassDepthStencilAttachmentDescriptor cDepthStencilAttachmentInfo = {};
};
struct BasicRenderPass {
@@ -129,28 +137,6 @@ namespace utils {
const wgpu::BindGroupLayout& layout,
std::initializer_list<BindingInitializationHelper> entriesInitializer);
- struct BufferTextureCopyLayout {
- uint64_t byteLength;
- uint64_t texelBlockCount;
- uint32_t bytesPerRow;
- uint32_t texelBlocksPerRow;
- uint32_t bytesPerImage;
- uint32_t texelBlocksPerImage;
- wgpu::Extent3D mipSize;
- };
-
- uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
- uint32_t GetBytesInBufferTextureCopy(wgpu::TextureFormat format,
- uint32_t width,
- uint32_t bytesPerRow,
- uint32_t rowsPerImage,
- uint32_t copyArrayLayerCount);
- BufferTextureCopyLayout GetBufferTextureCopyLayoutForTexture2DAtLevel(
- wgpu::TextureFormat format,
- wgpu::Extent3D textureSizeAtLevel0,
- uint32_t mipmapLevel,
- uint32_t rowsPerImage);
-
} // namespace utils
#endif // UTILS_DAWNHELPERS_H_
diff --git a/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp b/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp
new file mode 100644
index 00000000000..76c9faa81a8
--- /dev/null
+++ b/chromium/third_party/dawn/src/utils/WindowsDebugLogger.cpp
@@ -0,0 +1,104 @@
+// Copyright 2020 The Dawn Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "utils/PlatformDebugLogger.h"
+
+#include "common/Assert.h"
+#include "common/windows_with_undefs.h"
+
+#include <array>
+#include <thread>
+
+namespace utils {
+
+ class WindowsDebugLogger : public PlatformDebugLogger {
+ public:
+ WindowsDebugLogger() : PlatformDebugLogger() {
+ if (IsDebuggerPresent()) {
+ // This condition is true when running inside Visual Studio or some other debugger.
+ // Messages are already printed there so we don't need to do anything.
+ return;
+ }
+
+ mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
+ ASSERT(mShouldExitHandle != nullptr);
+
+ mThread = std::thread(
+ [](HANDLE shouldExit) {
+ // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
+ // for the layout of this struct.
+ struct {
+ DWORD process_id;
+ char data[4096 - sizeof(DWORD)];
+ }* dbWinBuffer = nullptr;
+
+ HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
+ 0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
+ ASSERT(file != nullptr);
+ ASSERT(file != INVALID_HANDLE_VALUE);
+
+ dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
+ MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
+ ASSERT(dbWinBuffer != nullptr);
+
+ HANDLE dbWinBufferReady =
+ CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
+ ASSERT(dbWinBufferReady != nullptr);
+
+ HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
+ ASSERT(dbWinDataReady != nullptr);
+
+ std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
+ while (true) {
+ SetEvent(dbWinBufferReady);
+ DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
+ FALSE, INFINITE);
+ if (wait == WAIT_OBJECT_0) {
+ break;
+ }
+ ASSERT(wait == WAIT_OBJECT_0 + 1);
+ fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
+ dbWinBuffer->data);
+ fflush(stderr);
+ }
+
+ CloseHandle(dbWinDataReady);
+ CloseHandle(dbWinBufferReady);
+ UnmapViewOfFile(dbWinBuffer);
+ CloseHandle(file);
+ },
+ mShouldExitHandle);
+ }
+
+ ~WindowsDebugLogger() override {
+ if (mShouldExitHandle != nullptr) {
+ ASSERT(SetEvent(mShouldExitHandle) != 0);
+ CloseHandle(mShouldExitHandle);
+ }
+
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+ }
+
+ private:
+ std::thread mThread;
+ HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
+ };
+
+ PlatformDebugLogger* CreatePlatformDebugLogger() {
+ return new WindowsDebugLogger();
+ }
+
+} // namespace utils
diff --git a/chromium/third_party/dawn/third_party/.clang-format b/chromium/third_party/dawn/third_party/.clang-format
new file mode 100644
index 00000000000..9d159247d51
--- /dev/null
+++ b/chromium/third_party/dawn/third_party/.clang-format
@@ -0,0 +1,2 @@
+DisableFormat: true
+SortIncludes: false
diff --git a/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn b/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
index a5a46412aca..ba2336fbd6a 100644
--- a/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
+++ b/chromium/third_party/dawn/third_party/gn/glfw/BUILD.gn
@@ -134,7 +134,7 @@ static_library("glfw") {
"${glfw_dir}/src/nsgl_context.h",
"${glfw_dir}/src/nsgl_context.m",
]
- libs += [
+ frameworks = [
"Cocoa.framework",
"IOKit.framework",
"CoreFoundation.framework",
diff --git a/chromium/third_party/dawn/third_party/khronos/BUILD.gn b/chromium/third_party/dawn/third_party/khronos/BUILD.gn
index 52ce94bd53b..f9e6df25096 100644
--- a/chromium/third_party/dawn/third_party/khronos/BUILD.gn
+++ b/chromium/third_party/dawn/third_party/khronos/BUILD.gn
@@ -41,9 +41,7 @@ config("khronos_headers_public") {
}
source_set("khronos_platform") {
- sources = [
- "KHR/khrplatform.h",
- ]
+ sources = [ "KHR/khrplatform.h" ]
public_configs = [ ":khronos_headers_public" ]
}